text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` #import matplotlib.pyplot as plt #import seaborn as sns import pandas as pd import numpy as np import random as rnd from sklearn.cross_validation import KFold, cross_val_score # machine learning from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier train_=pd.read_csv('../final.csv') validate_=pd.read_csv('../validate_allcols.csv') #test=pd.read_csv('../testwDSM.csv') characters = pd.read_csv('chars.csv', sep=',') train_.shape, validate_.shape, #test.shape train = train_.query('DSMCRIT > 13 and DSMCRIT < 20') validate = validate_.query('DSMCRIT > 13 and DSMCRIT < 20') #print train['DSMCRIT'].value_counts() print train.shape train['DETCRIM'].replace(to_replace=[-9], value = 0, inplace=True) train['DETNLF'].replace(to_replace=[-9], value = 0, inplace=True) train['IDU'].replace(to_replace=[-9], value = 0, inplace=True) train.ix[train.GENDER.isin([1]), 'PREG'] = 2 train.ix[train.SUB2.isin([1]), 'FREQ2'] = 0 train.ix[train.SUB2.isin([1]), 'FRSTUSE2'] = 0 train.ix[train.SUB2.isin([1]), 'ROUTE2'] = 0 train.ix[train.SUB3.isin([1]), 'FREQ3'] = 0 train.ix[train.SUB3.isin([1]), 'FRSTUSE3'] = 0 train.ix[train.SUB3.isin([1]), 'ROUTE3'] = 0 train = train[retain_list + ['DSMCRIT']] train = train[(train >= 0).all(1)] print train.shape #train.describe() #train = train.sample(20000) #validate = validate.sample(6000) train.shape, #validate.shape, #validate.head(2) #train = train.query('SUB1 <= 10').query('SUB2 <= 10') #validate = validate.query('SUB1 <= 10').query('SUB2 <= 10') drop_list = ['DSMCRIT', #'NUMSUBS' ] drop_list_select = ['RACE', 'PREG', 'ARRESTS', 'PSYPROB', 'DETNLF', 'ETHNIC', 'MARSTAT', 'GENDER', 'EDUC' ,'LIVARAG', 'EMPLOY', 'SUB3'] retain_list = ['RACE','PCPFLG','PRIMINC','LIVARAG','BENZFLG','HLTHINS','GENDER','ROUTE3','PRIMPAY', 'MARSTAT','PSYPROB','ROUTE2','EMPLOY','SUB2','FRSTUSE3','FREQ3','FRSTUSE2','OTHERFLG', 'EDUC','FREQ2','FREQ1','YEAR', 'PSOURCE','DETCRIM','DIVISION','REGION','NOPRIOR','NUMSUBS','ALCDRUG', 'METHUSE','FRSTUSE1','AGE','COKEFLG','OPSYNFLG','IDU','SERVSETA','ROUTE1','MARFLG', 'MTHAMFLG','HERFLG', 'ALCFLG','SUB1'] X_train = train[retain_list] X_c = characters[retain_list] Y_c = characters["DSMCRIT"] #X_train = train.drop(drop_list + drop_list_select, axis=1) Y_train = train["DSMCRIT"] #X_validate = validate.drop(drop_list + drop_list_select, axis=1) #Y_validate = validate["DSMCRIT"] #X_test = test.drop(drop_list, axis=1) X_train.shape, #X_validate.shape, #X_test.shape print X_train.columns.tolist() #one hot from sklearn import preprocessing # 1. INSTANTIATE enc = preprocessing.OneHotEncoder() # 2. FIT enc.fit(X_train) # 3. Transform onehotlabels = enc.transform(X_train).toarray() X_train = onehotlabels #onehotlabels = enc.transform(X_validate).toarray() #X_validate = onehotlabels X_char = enc.transform(X_c).toarray() print X_train.shape, X_char.shape#X_validate.shape #kfold kf = 3 # Logistic Regression logreg = LogisticRegression(n_jobs=-1) logreg.fit(X_train, Y_train) #Y_pred = logreg.predict(X_test) l_acc_log = cross_val_score(logreg, X_train, Y_train, cv=kf) acc_log = round(np.mean(l_acc_log), 3) l_acc_log = ['%.3f' % elem for elem in l_acc_log] print l_acc_log print acc_log yp_char = logreg.predict_proba(X_char) print yp_char #print Y_c # Random Forest (slow) random_forest = RandomForestClassifier(n_estimators=200, max_depth=20, n_jobs=-1) random_forest.fit(X_train, Y_train) #Y_pred = random_forest.predict(X_test) l_acc_random_forest = cross_val_score(random_forest, X_train, Y_train, cv=kf) acc_random_forest = round(np.mean(l_acc_random_forest), 3) l_acc_random_forest = ['%.3f' % elem for elem in l_acc_random_forest] print l_acc_random_forest print acc_random_forest yp_char = random_forest.predict_proba(X_char) print yp_char #print Y_c ```
github_jupyter
``` #### Projeto: Desafio ZAP #### Programa para Precificar o Dataset source-4-ds-test.json do ZAP com os Modelos gerados com hiperparâmetros iniciais #### Autor: Rodolfo Bugarin import pandas as pd import numpy as np import sklearn as sk import pickle pd.options.display.float_format = '{:,.4f}'.format # # Carregar o Dataframe # df = pickle.load(open('source-4-ds-test.pickle', 'rb')) df_original = pickle.load(open('source-4-ds-test.pickle', 'rb')) pd.set_option('display.max_columns', 999) df.head(n=3) # # Tratamento de Dados # # O Desafio ZAP pede estimar um preço de venda para os apartamentos no dataset de teste # Desta forma garantimos termos no dataframe somente os imóveis "apartamento" e que estejam à venda (ou ambos) df.shape df.drop(df[df['pricinginfos_businesstype'] == 'RENTAL'].index, inplace = True) df.drop(df[df['unittypes'] != 'APARTMENT'].index, inplace = True) df.shape # Criar a coluna Zona com base na coluna address_locationid e address_zone import re # Função para extrair zonas de São Paulo def Zona_names(Zona_name): if re.search('\BR>Sao Paulo>NULL>Sao Paulo>.*', Zona_name): Zona_name = Zona_name[28:] if re.search('\>.*', Zona_name): pos = re.search('\>.*', Zona_name).start() Zona_name = Zona_name[:pos] return Zona_name else: return "" # Criação da nova coluna Zona df['Zona'] = df['address_locationid'].apply(Zona_names) # Print the updated dataframe df[['address_locationid', 'Zona']].head() df.groupby('Zona').Zona.count() df[df['Zona'] ==""].groupby('address_zone').address_zone.count() # Nos casos em que a nova coluna "Zona" ficou com contéudo "", preencher com o contéu da coluna "address_zone" for item in df.iterrows(): if item[1].Zona == "": df.loc[item[0], 'Zona'] = item[1].address_zone # Nos casos em que a nova coluna "Zona" está preenhida com "Centro", alterar para "Zona Centro" df['Zona'] = df['Zona'].apply(lambda x: "Zona Centro" if x == 'Centro' else x) df.groupby('Zona').Zona.count() df.groupby('publicationtype').publicationtype.count() # # Converter as colunas categóricas em Dummies # import re #### Função para remover os espaços e caracteres especiais. def arrumar_string(v_string): novo_string = v_string.replace(' ', '_') novo_string = novo_string.replace('.', '') novo_string = novo_string.replace('(', '') novo_string = novo_string.replace(')', '') novo_string = novo_string.replace('-', '') novo_string = re.sub("\d", "x", novo_string) return novo_string df.pricinginfos_businesstype = df.pricinginfos_businesstype.apply(arrumar_string) df['pricinginfos_businesstype'] = df['pricinginfos_businesstype'].apply(lambda x: "SemBusinessDefinido" if x == "" else x) df_aux = pd.get_dummies(df['pricinginfos_businesstype']) df = pd.concat([df, df_aux], axis=1) df.publicationtype = df.publicationtype.apply(arrumar_string) df['publicationtype'] = df['publicationtype'].apply(lambda x: "SemPublicationDefinido" if x == "" else x) df_aux = pd.get_dummies(df['publicationtype']) df = pd.concat([df, df_aux], axis=1) df.Zona = df.Zona.apply(arrumar_string) df['Zona'] = df['Zona'].apply(lambda x: "SemZonaDefinida" if x == "" else x) df_aux = pd.get_dummies(df['Zona']) df = pd.concat([df, df_aux], axis=1) # # Eliminar as colunas que não serão utilizadas no modelo # # Eliminar a coluna target, variável a ser explicada. (df.drop(['pricinginfos_price'], axis=1, inplace=True)) # Todas os imóveis são da cidade de São Paulo, então podemos remover cidade, estado e país (df.drop(['address_city', 'address_country', 'address_state'], axis=1, inplace=True)) # Eliminar as colunas tem quase todos os valores vazios (df.drop(['address_district', 'pricinginfos_period'], axis=1, inplace=True)) # A coluna "address_zone" e "address_locationid" não são mais necessária, pois foram substituídas pela coluna "Zona" (df.drop(['address_zone', 'address_locationid'], axis=1, inplace=True)) # Utilizamos a latitude e longitude como vetor de localização do imóvel, então todos os campos de endereço devem ser removidos (df.drop(['address_neighborhood', 'address_street', 'address_streetnumber', 'address_unitnumber', 'address_zipcode'], axis=1, inplace=True)) # Eliminar as colunas de contrale e id por serem inúteis ao modelo (df.drop(['createdat', 'id', 'owner', 'publisherid', 'updatedat', 'address_geolocation_precision'], axis=1, inplace=True)) # Eliminar as colunas de descrição e de links (df.drop(['description', 'images', 'title'], axis=1, inplace=True)) # Eliminar as colunas que têm valores únicos (df.drop(['listingstatus', 'unittypes'], axis=1, inplace=True)) # Eliminar as colunas categóricas (df.drop(['pricinginfos_businesstype', 'publicationtype', 'Zona'], axis=1, inplace=True)) # DF para Guardar Medidas de Posicao de São Paulo df_medidas = pd.DataFrame({'Cidade': ['São Paulo']}) # # Carregar do Daframe de Medidas de Posição # df_medidas = pickle.load(open('df_medidas.pickle', 'rb')) # # Corrigindo os Missing e Outliers # features = df.columns.tolist()[0:11] # Corrigindo Missings for i in features: coluna = i + '_mediana' mediana = df_medidas.iloc[0][coluna] df.loc[df[i].isnull(), i] = mediana # Corrigindo Outliers for i in features: coluna = i + '_p99' p99 = df_medidas.iloc[0][coluna] df.loc[df[i] > p99, i] = p99 # # Criar a coluna que mede a distância entre a geolocalização de referência (mediana da cidade) e o imóvel # # Funçao para calcular a distância entre dois pontos from math import radians, degrees, sin, cos, asin, acos, sqrt, atan2 def great_circle(lat1, lon1, lat2, lon2): R = 6373.0 lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2]) dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2 c = 2 * atan2(sqrt(a), sqrt(1 - a)) distance = R * c return distance lat_mediana = df_medidas.iloc[0]['address_geolocation_location_lat_mediana'] lon_mediana = df_medidas.iloc[0]['address_geolocation_location_lon_mediana'] # Criar a nova coluna que mede a distância do imóvel para a mediana def calculo_distancia (r): v_distancia = great_circle(lat_mediana, lon_mediana, r.address_geolocation_location_lat, r.address_geolocation_location_lon) return v_distancia df['Distancia'] = df.apply(calculo_distancia, axis=1) df.drop(['address_geolocation_location_lat', 'address_geolocation_location_lon'], axis=1, inplace=True) # # Precificacão # # Scaling from sklearn.preprocessing import StandardScaler scaler = StandardScaler() # Fit the training data scaler.fit(df) # Apply the transformations to the data: X_prod = scaler.transform(df) # Linear Regression from sklearn.linear_model import LinearRegression lr = LinearRegression() modelo_lr = pickle.load(open('modelo_lr.pickle', 'rb')) predictions_lr = modelo_lr.predict(X_prod) predictions_lr # Decision Tree from sklearn.tree import DecisionTreeRegressor dt = DecisionTreeRegressor(max_depth=20, min_samples_split=50) modelo_dr = pickle.load(open('modelo_dr.pickle', 'rb')) predictions_dr = modelo_dr.predict(X_prod) predictions_dr # Random Forest from sklearn.ensemble import RandomForestRegressor rf = RandomForestRegressor(n_estimators=100, max_depth=20, max_features='auto') modelo_rf = pickle.load(open('modelo_rf.pickle', 'rb')) predictions_rf = modelo_rf.predict(X_prod) predictions_rf # Boosting from sklearn import ensemble params = {'n_estimators': 200, 'max_depth': 50, 'min_samples_split': 20, 'learning_rate': 0.01, 'loss': 'ls'} clf = ensemble.GradientBoostingRegressor(**params) modelo_clf = pickle.load(open('modelo_clf.pickle', 'rb')) predictions_clf = modelo_clf.predict(X_prod) predictions_clf # # Neural Network # from sklearn.neural_network import MLPRegressor mlp = MLPRegressor(hidden_layer_sizes=(17,17), random_state=42, max_iter=200, activation='relu') modelo_mlp = pickle.load(open('modelo_mlp.pickle', 'rb')) predictions_mlp = modelo_mlp.predict(X_prod) predictions_mlp # Rotinas para gerar o arquivo de saída CSV com os Preços de Venda estimados para cada imóvel de cada modelo utlizado # CSV de precificação baseado em Regressão Linear df_predictions_lr = pd.DataFrame(predictions_lr) df_predictions_lr.rename({0: "price"}, axis=1, inplace=True) df_concat = pd.concat ([df_predictions_lr, df_original], axis=1) df_preco_venda = df_concat[['id','price']].copy() df_preco_venda.to_csv ('predictions_lr.csv', index = False, header=True) # CSV de precificação baseado em Decision Tree df_predictions_dr = pd.DataFrame(predictions_dr) df_predictions_dr.rename({0: "price"}, axis=1, inplace=True) df_concat = pd.concat ([df_predictions_dr, df_original], axis=1) df_preco_venda = df_concat[['id','price']].copy() df_preco_venda.to_csv ('predictions_dr.csv', index = False, header=True) # CSV de precificação baseado em Random Forest df_predictions_rf = pd.DataFrame(predictions_rf) df_predictions_rf.rename({0: "price"}, axis=1, inplace=True) df_concat = pd.concat ([df_predictions_rf, df_original], axis=1) df_preco_venda = df_concat[['id','price']].copy() df_preco_venda.to_csv ('predictions_rf.csv', index = False, header=True) # CSV de precificação baseado em Boosting df_predictions_clf = pd.DataFrame(predictions_clf) df_predictions_clf.rename({0: "price"}, axis=1, inplace=True) df_concat = pd.concat ([df_predictions_clf, df_original], axis=1) df_preco_venda = df_concat[['id','price']].copy() df_preco_venda.to_csv ('predictions_clf.csv', index = False, header=True) # CSV de precificação baseado em Neural Network df_predictions_mlp = pd.DataFrame(predictions_mlp) df_predictions_mlp.rename({0: "price"}, axis=1, inplace=True) df_concat = pd.concat ([df_predictions_mlp, df_original], axis=1) df_preco_venda = df_concat[['id','price']].copy() df_preco_venda.to_csv ('predictions_mlp.csv', index = False, header=True) ```
github_jupyter
[Leo's Home page](https://leomrocha.github.com) -- [Github Page](https://github.com/leomrocha/minibrain/blob/master/sensors/image) -- License: [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/) # Experiments with Image Convolutional Autoencoders [Leonardo M. Rocha](https://leomrocha.github.com) [Contact Me](https://leomrocha.github.io/contact/) ## Introduction This notebook presents some experimentation I did in 2018 with Convolutional Autoencoders. All the source code of the experiments (working _and_ broken) is available in the [Github project](https://github.com/leomrocha/minibrain/blob/master/sensors/image) There is no much more text in this notebook except for some words at the end, as the source code and comments should be enough to explain how and why things work. ``` import torch import torchvision from torch import nn, optim from torch.nn import functional as F from torch.autograd import Variable from torch.utils.data import DataLoader, Dataset from torchvision import transforms, utils from torchvision import datasets from torchvision.utils import save_image # import skimage import math # import io # import requests # from PIL import Image import numpy as np import pandas as pd # import matplotlib.pyplot as plt import sys import os ``` ## Bibliography: * [Stacked Convolutional Auto-Encoders for Hierarchical Feature Extraction](http://people.idsia.ch/~ciresan/data/icann2011.pdf) ### Examples: * https://github.com/pytorch/examples/blob/master/mnist/main.py * https://github.com/csgwon/pytorch-deconvnet/blob/master/models/vgg16_deconv.py ### Other resources * https://github.com/pgtgrly/Convolution-Deconvolution-Network-Pytorch/blob/master/conv_deconv.py * https://github.com/kvfrans/variational-autoencoder * https://github.com/SherlockLiao/pytorch-beginner/blob/master/08-AutoEncoder/conv_autoencoder.py * https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/02-intermediate/convolutional_neural_network/main-gpu.py * https://pgaleone.eu/neural-networks/2016/11/24/convolutional-autoencoders/ ``` class CAEEncoder(nn.Module): """ The Encoder = Q(z|X) for the Network """ def __init__(self, w,h, channels=3, hid_dim=500, code_dim=200, kernel_size=3, first_feature_count=16): super(CAEEncoder, self).__init__() self.indices = [] padding = math.floor(kernel_size/2) l1_feat = first_feature_count l2_feat = l1_feat * 2 self.layer1 = nn.Sequential( nn.Conv2d(channels, l1_feat, kernel_size=kernel_size, padding=padding), # nn.BatchNorm2d(l1_feat), nn.ReLU(), # nn.Conv2d(l1_feat, l1_feat, kernel_size=kernel_size, padding=padding), # # nn.BatchNorm2d(l1_feat), # nn.ReLU(), # nn.Conv2d(l1_feat, l1_feat, kernel_size=kernel_size, padding=padding), # # nn.BatchNorm2d(l1_feat), # nn.ReLU(), nn.Conv2d(l1_feat, l1_feat, kernel_size=kernel_size, padding=padding), # nn.BatchNorm2d(l1_feat), nn.ReLU(), torch.nn.MaxPool2d(2, stride=2, return_indices=True) ) self.layer2 = nn.Sequential( nn.Conv2d(l1_feat, l2_feat, kernel_size=kernel_size, padding=padding), # nn.BatchNorm2d(l2_feat), nn.ReLU(), # nn.Conv2d(l2_feat, l2_feat, kernel_size=kernel_size, padding=padding), # # nn.BatchNorm2d(l2_feat), # nn.ReLU(), # nn.Conv2d(l2_feat, l2_feat, kernel_size=kernel_size, padding=padding), # # nn.BatchNorm2d(l2_feat), # nn.ReLU(), nn.Conv2d(l2_feat, l2_feat, kernel_size=kernel_size, padding=padding), # nn.BatchNorm2d(l2_feat), nn.ReLU(), torch.nn.MaxPool2d(2, stride=2, return_indices=True) ) self.conv_dim = int(((w*h)/16) * l2_feat) #self.conv_dim = int( channels * (w/4) * l2_feat) self.fc1 = nn.Linear(self.conv_dim, hid_dim) self.fc2 = nn.Linear(hid_dim, code_dim) # self.fc1 = nn.Linear(576, hid_dim) def get_conv_layer_indices(self): return [0, 2, 5, 7, 10] # without BatchNorm2d #return [0, 3, 7, 10, 14] # with BatchNorm2d def forward(self, x): self.indices = [] # print("encoding conv l1") out, idx = self.layer1(x) self.indices.append(idx) # print("encoding conv l2") out, idx = self.layer2(out) self.indices.append(idx) # print(out.size(), self.conv_dim) # print("view for FC l1") out = out.view(out.size(0), -1) # print(out.size()) # print("encoding FC1 ") out = self.fc1(out) # print("encoding FC2 ") out = self.fc2(out) return out class CAEDecoder(torch.nn.Module): """ The Decoder = P(X|z) for the Network """ def __init__(self, encoder, width, height, channels=3, hid_dim=500, code_dim=200, kernel_size=3, first_feature_count=16): super(CAEDecoder, self).__init__() padding = math.floor(kernel_size/2) # self. width = width # self.height = height # self.channels = channels self.encoder = encoder self.w_conv_dim = int(width/4) self.h_conv_dim = int(height/4) self.l1_feat = first_feature_count self.l2_feat = self.l1_feat * 2 self.conv_dim = int(((width*height)/16) * self.l2_feat) #self.conv_dim = int(channels * (width/4) * self.l2_feat) self.layer1 = torch.nn.Linear(code_dim, hid_dim) self.layer2 = torch.nn.Linear(hid_dim, self.conv_dim) self.unpool_1 = nn.MaxUnpool2d(2, stride=2) self.deconv_layer_1 = torch.nn.Sequential( nn.ConvTranspose2d(self.l2_feat, self.l2_feat, kernel_size=kernel_size, padding=padding), nn.ReLU(), # nn.ConvTranspose2d(self.l2_feat, self.l2_feat, kernel_size=kernel_size, padding=padding), # nn.ReLU(), # nn.ConvTranspose2d(self.l2_feat, self.l2_feat, kernel_size=kernel_size, padding=padding), # nn.ReLU(), nn.ConvTranspose2d(self.l2_feat, self.l1_feat, kernel_size=kernel_size, padding=padding), nn.ReLU() ) self.unpool_2 = nn.MaxUnpool2d(2, stride=2) self.deconv_layer_2 = torch.nn.Sequential( nn.ConvTranspose2d(self.l1_feat, self.l1_feat, kernel_size=kernel_size, padding=padding), nn.ReLU(), # nn.ConvTranspose2d(self.l1_feat, self.l1_feat, kernel_size=kernel_size, padding=padding), # nn.ReLU(), # nn.ConvTranspose2d(self.l1_feat, self.l1_feat, kernel_size=kernel_size, padding=padding), # nn.ReLU(), nn.ConvTranspose2d(self.l1_feat, channels, kernel_size=kernel_size, padding=padding), nn.Tanh() ) def forward(self, x): out = x # print("decoding l1") out = F.relu(self.layer1(x)) # print("decoding l2") out = F.relu(self.layer2(out)) # print(out.size(), self.conv_dim) # print("changing tensor shape to be an image") out = out.view(out.size(0), self.l2_feat, self.w_conv_dim, self.h_conv_dim) out = self.unpool_1(out, self.encoder.indices[-1]) # print(out.size()) # print("decoding c1") out = self.deconv_layer_1(out) # print("decoding c2") out = self.unpool_2(out, self.encoder.indices[-2]) out = self.deconv_layer_2(out) # print("returning decoder response") return out class CAE(nn.Module): def __init__(self, width, height, channels, hid_dim=500, code_dim=200, conv_layer_feat=16): super(CAE, self).__init__() self.width = width self.height = height self.channels = channels self.encoder = CAEEncoder(width, height, channels, hid_dim, code_dim, 3, conv_layer_feat) self.decoder = CAEDecoder(self.encoder, width, height, channels, hid_dim, code_dim, 3, conv_layer_feat) def forward(self, x): out = self.encoder(x) out = self.decoder(out) return out def save_model(self, name, path): torch.save(self.encoder, os.path.join(path, "cae_encoder_"+name+".pth")) torch.save(self.decoder, os.path.join(path, "cae_decoder_"+name+".pth")) #definitions of the operations for the full image autoencoder normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], # from example here https://github.com/pytorch/examples/blob/409a7262dcfa7906a92aeac25ee7d413baa88b67/imagenet/main.py#L94-L95 std=[0.229, 0.224, 0.225] # mean=[0.5, 0.5, 0.5], # from example here http://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html # std=[0.5, 0.5, 0.5] ) #the whole image gets resized to a small image that can be quickly analyzed to get important points def fullimage_preprocess(w=48,h=48): return transforms.Compose([ transforms.Resize((w,h)), #this should be used ONLY if the image is bigger than this size transforms.ToTensor(), normalize ]) #the full resolution fovea just is a small 12x12 patch full_resolution_crop = transforms.Compose([ transforms.RandomCrop(12), transforms.ToTensor(), normalize ]) def downsampleTensor(crop_size, final_size=16): sample = transforms.Compose([ transforms.RandomCrop(crop_size), transforms.Resize(final_size), transforms.ToTensor(), normalize ]) return sample def get_loaders(batch_size, transformation, dataset = datasets.CIFAR100, cuda=True): kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {} train_loader = torch.utils.data.DataLoader( dataset('../data', train=True, download=True, transform=transformation), batch_size=batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( dataset('../data', train=False, transform=transformation), batch_size=batch_size, shuffle=True, **kwargs) return train_loader, test_loader # Hyper Parameters # num_epochs = 5 # batch_size = 100 # learning_rate = 0.001 num_epochs = 100 batch_size = 128 learning_rate = 0.0001 model = CAE(12,12,3,500,200,32).cuda() criterion = nn.MSELoss() #criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5) model.parameters def to_img(x): x = 0.5 * (x + 1) x = x.clamp(0, 1) x = x.view(x.size(0), 3, 12, 12) return x transformation = full_resolution_crop train_loader, test_loader = get_loaders(batch_size, transformation) %%time for epoch in range(num_epochs): for i, (img, labels) in enumerate(train_loader): img = Variable(img).cuda() # ===================forward===================== # print("encoding batch of images") output = model(img) # print("computing loss") loss = criterion(output, img) # ===================backward==================== # print("Backward ") optimizer.zero_grad() loss.backward() optimizer.step() # ===================log======================== print('epoch [{}/{}], loss:{:.4f}'.format(epoch+1, num_epochs, loss.data)) if epoch % 10 == 0: pic = to_img(output.cpu().data) in_pic = to_img(img.cpu().data) save_image(pic, './cae_results/2x2-2xfc-out_image_{}.png'.format(epoch)) save_image(in_pic, './cae_results/2x2-2xfc-in_image_{}.png'.format(epoch)) if loss.data < 0.15: #arbitrary number because I saw that it works well enough break model.save_model("2x2-2xfc-layer", "CAE") ``` Input and Output for the first epoch ![input](cae_results/2x2-2xfc-in_image_0.png) ![output](cae_results/2x2-2xfc-out_image_0.png) Input and Output for the 90th epoch ![input](cae_results/2x2-2xfc-in_image_90.png) ![output](cae_results/2x2-2xfc-out_image_90.png) ## Preliminary Results Experiments with the following configurations: - 2 layers with 2 convolutional stages each <- **best result** - 2 layers with 2 convolutional stages each and 2 fully connected layers <- bigger model and a bit slower to converge, but results are good too - 2 layers with 2 convolutional stages each with batch normalization - 2 layers with 4 convolutional stages each <- **worst result** 2 layers with 4 conv stages each does not give the same results as 2 layers with 2 conv stages It not only converges MUCH faster and the models are smaller, but the actually the convergence is much better For batch normalization happens the same, without batchnorm2d converges faster and model is smaller
github_jupyter
# Speech Increasingly, we expect to be able to communicate with artificial intelligence (AI) systems by talking to them, often with the expectation of a spoken response. ![A robot speaking](./images/speech.jpg) *Speech recognition* (an AI system interpreting spoken language) and *speech synthesis* (an AI system generating a spoken response) are the key components of a speech-enabled AI solution. ## Create a Cognitive Services resource To build software that can interpret audible speech and respond verbally, you can use the **Speech** cognitive service, which provides a simple way to transcribe spoken language into text and vice-versa. If you don't already have one, use the following steps to create a **Cognitive Services** resource in your Azure subscription: > **Note**: If you already have a Cognitive Services resource, just open its **Quick start** page in the Azure portal and copy its key and endpoint to the cell below. Otherwise, follow the steps below to create one. 1. In another browser tab, open the Azure portal at https://portal.azure.com, signing in with your Microsoft account. 2. Click the **&#65291;Create a resource** button, search for *Cognitive Services*, and create a **Cognitive Services** resource with the following settings: - **Subscription**: *Your Azure subscription*. - **Resource group**: *Select or create a resource group with a unique name*. - **Region**: *Choose any available region*: - **Name**: *Enter a unique name*. - **Pricing tier**: S0 - **I confirm I have read and understood the notices**: Selected. 3. Wait for deployment to complete. Then go to your cognitive services resource, and on the **Overview** page, click the link to manage the keys for the service. You will need the key and location to connect to your cognitive services resource from client applications. ### Get the Key and Location for your Cognitive Services resource To use your cognitive services resource, client applications need its authentication key and location: 1. In the Azure portal, on the **Keys and Endpoint** page for your cognitive service resource, copy the **Key1** for your resource and paste it in the code below, replacing **YOUR_COG_KEY**. 2. Copy the **Location** for your resource and and paste it in the code below, replacing **YOUR_COG_LOCATION**. >**Note**: Stay on the **Keys and Endpoint** page and copy the **Location** from this page (example: _westus_). Please _do not_ add spaces between words for the Location field. 3. Run the code below by clicking the **Run cell** (&#9655;) button to the left of the cell. ``` cog_key = 'YOUR_COG_KEY' cog_location = 'YOUR_COG_LOCATION' print('Ready to use cognitive services in {} using key {}'.format(cog_location, cog_key)) ``` ## Speech recognition Suppose you want to build a home automation system that accepts spoken instructions, such as "turn the light on" or "turn the light off". Your application needs to be able to take the audio-based input (your spoken instruction), and interpret it by transcribing it to text that it can then parse and analyze. Now you're ready to transcribe some speech. The input can be from a **microphone** or an **audio file**. ### Speech Recognition with an audio file Run the cell below to see the Speech Recognition service in action with an **audio file**. ``` import os from playsound import playsound from azure.cognitiveservices.speech import SpeechConfig, SpeechRecognizer, AudioConfig # Get spoken command from audio file file_name = 'light-on.wav' audio_file = os.path.join('data', 'speech', file_name) # Configure speech recognizer speech_config = SpeechConfig(cog_key, cog_location) audio_config = AudioConfig(filename=audio_file) # Use file instead of default (microphone) speech_recognizer = SpeechRecognizer(speech_config, audio_config) # Use a one-time, synchronous call to transcribe the speech speech = speech_recognizer.recognize_once() # Show transcribed text from audio file print(speech.text) ``` ## Speech synthesis So now you've seen how the Speech service can be used to transcribe speech into text; but what about the opposite? How can you convert text into speech? Well, let's assume your home automation system has interpreted a command to turn the light on. An appropriate response might be to acknowledge the command verbally (as well as actually performing the commanded task!) ``` import os import matplotlib.pyplot as plt from PIL import Image from azure.cognitiveservices.speech import SpeechConfig, SpeechSynthesizer, AudioConfig %matplotlib inline # Get text to be spoken response_text = 'Turning the light on.' # Configure speech synthesis speech_config = SpeechConfig(cog_key, cog_location) speech_synthesizer = SpeechSynthesizer(speech_config) # Transcribe text into speech result = speech_synthesizer.speak_text(response_text) # Display an appropriate image file_name = response_text.lower() + "jpg" img = Image.open(os.path.join("data", "speech", file_name)) plt.axis('off') plt. imshow(img) ``` Try changing the **response_text** variable to *Turning the light off.* (including the period at the end) and run the cell again to hear the result. ## Learn more You've seen a very simple example of using the Speech cognitive service in this notebook. You can learn more about [speech-to-text](https://docs.microsoft.com/azure/cognitive-services/speech-service/index-speech-to-text) and [text-to-speech](https://docs.microsoft.com/azure/cognitive-services/speech-service/index-text-to-speech) in the Speech service documentation.
github_jupyter
# Simple Tools from Extracting Quantities from Strings Suppose we have a report and we want to find the sentences that are talking about numerical things.... *Originally inspired by [When you get data in sentences: how to use a spreadsheet to extract numbers from phrases](https://onlinejournalismblog.com/2019/07/29/when-you-get-data-in-sentences-how-to-use-a-spreadsheet-to-extract-numbers-from-phrases/), Paul Bradshaw, Online Journalism blog, from which some of the example sentences (sic!) are taken.* Distribution: https://twitter.com/paulbradshaw/status/1158752556958519297 ## Potentially Useful Python Packages - `quantulum`: extract quantities from natural language text; - `ctparse`: extract time / date related quantities from natural language text; - `r1chardj0n3s/parse`: easy scrape / regex extraction from semi-structred text using `format()` like patterns; example use [here](https://github.com/psychemedia/parlihacks/blob/master/notebooks/MP%20Register%20of%20Interests.ipynb); - [`dateparser`](https://github.com/scrapinghub/dateparser) [[docs](https://dateparser.readthedocs.io/en/latest/)]: "easily parse localized dates in almost any string formats commonly found on web pages" (includes foreign language detection); - [invoice2data](https://github.com/invoice-x/invoice2data): ## Example Sentences Make a start on some sample test sentences... ``` sentences = [ '4 years and 6 months’ imprisonment with a licence extension of 2 years and 6 months', 'No quantities here', 'I measured it as 2 meters and 30 centimeters.', "four years and six months' imprisonment with a licence extension of 2 years and 6 months", 'it cost £250... bargain...', 'it weighs four hundred kilograms.', 'It weighs 400kg.', 'three million, two hundred & forty, you say?', 'it weighs four hundred and twenty kilograms.' ] ``` ## `quantulum3` [`quantulum3`](https://github.com/nielstron/quantulum3) is a Python package *"for information extraction of quantities from unstructured text"*. ``` #!pip3 install quantulum3 from quantulum3 import parser for sent in sentences: print(sent) p = parser.parse(sent) if p: print('\tSpoken:',parser.inline_parse_and_expand(sent)) print('\tNumeric elements:') for q in p: display(q) print('\t\t{} :: {}'.format(q.surface, q)) print('\n---------\n') ``` ## Finding quantity statements in large texts If we have a large block of text, we might want to quickly skim it for quantity containing sentences, we can do something like the following... ``` import spacy nlp = spacy.load('en_core_web_lg', disable = ['ner']) text = ''' Once upon a time, there was a thing. The thing weighed forty kilogrammes and cost £250. It was blue. It took forty five minutes to get it home. What a day that was. I didn't get back until 2.15pm. Then I had some cake for tea. ''' doc = nlp(text) for sent in doc.sents: print(sent) for sent in doc.sents: sent = sent.text p = parser.parse(sent) if p: print('\tSpoken:',parser.inline_parse_and_expand(sent)) print('\tNumeric elements:') for q in p: display(q) print('\t\t{} :: {}'.format(q.surface, q)) print('\n---------\n') ``` ## Annotating a dataset Can we extract numbers from sentences in a CSV file? Yes we can... ``` url = 'https://raw.githubusercontent.com/BBC-Data-Unit/unduly-lenient-sentences/master/ULS%20for%20Sankey.csv' import pandas as pd df = pd.read_csv(url) df.head() #get a row df.iloc[1] #and a, erm. sentence... df.iloc[1]['Original sentence (refined)'] parser.parse(df.iloc[1]['Original sentence (refined)']) def amountify(txt): #txt may be some flavout of nan... #handle scruffily for now... try: if txt: p = parser.parse(txt) x=[] for q in p: x.append( '{} {}'.format(q.value, q.unit.name)) return '::'.join(x) return '' except: return df['amounts'] = df['Original sentence (refined)'].apply(amountify) df.head() ``` We could then do something to split multiple amounts into multiple rows or columns... ### Parsing Semi-Structured Sentences The sentencing sentences look to have a reasonable degree of structure to them (or at least, there are some commenalities in the way some of them are structured). We can exploit this structure by writing some more specific pattern matches to pull out even more information. ``` df['Original sentence (refined)'][:20].apply(print); ``` It makes sense to try to build a default hierarchy that extracts from more specific to less specific structures... For example: - *9 months imprisonment suspended for 2 years* is more specific than *9 months imprisonment*
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. # Inference PyTorch Bert Model with ONNX Runtime on CPU In this tutorial, you'll be introduced to how to load a Bert model from PyTorch, convert it to ONNX, and inference it for high performance using ONNX Runtime. In the following sections, we are going to use the Bert model trained with Stanford Question Answering Dataset (SQuAD) dataset as an example. Bert SQuAD model is used in question answering scenarios, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. This notebook is for CPU inference. For GPU inferenece, please look at another notebook [Inference PyTorch Bert Model with ONNX Runtime on GPU](PyTorch_Bert-Squad_OnnxRuntime_GPU.ipynb). ## 0. Prerequisites ## If you have Jupyter Notebook, you may directly run this notebook. We will use pip to install or upgrade [PyTorch](https://pytorch.org/), [OnnxRuntime](https://microsoft.github.io/onnxruntime/) and other required packages. Otherwise, you can setup a new environment. First, we install [AnaConda](https://www.anaconda.com/distribution/). Then open an AnaConda prompt window and run the following commands: ```console conda create -n cpu_env python=3.6 conda activate cpu_env conda install jupyter jupyter notebook ``` The last command will launch Jupyter Notebook and we can open this notebook in browser to continue. ``` import sys run_install = False # Only need install once if run_install: if sys.platform in ['linux', 'win32']: # Linux or Windows !{sys.executable} -m pip install --upgrade torch torchvision torchaudio else: # Mac !{sys.executable} -m pip install torch==1.9.0+cpu torchvision==0.10.0+cpu torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html !{sys.executable} -m pip install onnxruntime==1.8.1 onnx==1.9.0 onnxconverter_common==1.8.1 # Install other packages used in this notebook. !{sys.executable} -m pip install transformers==4.8.2 !{sys.executable} -m pip install psutil pytz pandas py-cpuinfo py3nvml !{sys.executable} -m pip install wget netron ``` ## 1. Load Pretrained Bert model ## We begin by downloading the SQuAD data file and store them in the specified location. ``` import os cache_dir = os.path.join("..", "cache_models") if not os.path.exists(cache_dir): os.makedirs(cache_dir) predict_file_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json" predict_file = os.path.join(cache_dir, "dev-v1.1.json") if not os.path.exists(predict_file): import wget print("Start downloading predict file.") wget.download(predict_file_url, predict_file) print("Predict file downloaded.") ``` Specify some model configuration variables and constant. ``` # For fine tuned large model, the model name is "bert-large-uncased-whole-word-masking-finetuned-squad". Here we use bert-base for demo. model_name_or_path = "bert-base-cased" max_seq_length = 128 doc_stride = 128 max_query_length = 64 # Enable overwrite to export onnx model and download latest script each time when running this notebook. enable_overwrite = True # Total samples to inference. It shall be large enough to get stable latency measurement. total_samples = 100 ``` Start to load model from pretrained. This step could take a few minutes. ``` # The following code is adapted from HuggingFace transformers # https://github.com/huggingface/transformers/blob/master/examples/run_squad.py from transformers import (BertConfig, BertForQuestionAnswering, BertTokenizer) # Load pretrained model and tokenizer config_class, model_class, tokenizer_class = (BertConfig, BertForQuestionAnswering, BertTokenizer) config = config_class.from_pretrained(model_name_or_path, cache_dir=cache_dir) tokenizer = tokenizer_class.from_pretrained(model_name_or_path, do_lower_case=True, cache_dir=cache_dir) model = model_class.from_pretrained(model_name_or_path, from_tf=False, config=config, cache_dir=cache_dir) # load some examples from transformers.data.processors.squad import SquadV1Processor processor = SquadV1Processor() examples = processor.get_dev_examples(None, filename=predict_file) from transformers import squad_convert_examples_to_features features, dataset = squad_convert_examples_to_features( examples=examples[:total_samples], # convert just enough examples for this notebook tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, is_training=False, return_dataset='pt' ) ``` ## 2. Export the loaded model ## Once the model is loaded, we can export the loaded PyTorch model to ONNX. ``` output_dir = os.path.join("..", "onnx_models") if not os.path.exists(output_dir): os.makedirs(output_dir) export_model_path = os.path.join(output_dir, 'bert-base-cased-squad.onnx') import torch device = torch.device("cpu") # Get the first example data to run the model and export it to ONNX data = dataset[0] inputs = { 'input_ids': data[0].to(device).reshape(1, max_seq_length), 'attention_mask': data[1].to(device).reshape(1, max_seq_length), 'token_type_ids': data[2].to(device).reshape(1, max_seq_length) } # Set model to inference mode, which is required before exporting the model because some operators behave differently in # inference and training mode. model.eval() model.to(device) if enable_overwrite or not os.path.exists(export_model_path): with torch.no_grad(): symbolic_names = {0: 'batch_size', 1: 'max_seq_len'} torch.onnx.export(model, # model being run args=tuple(inputs.values()), # model input (or a tuple for multiple inputs) f=export_model_path, # where to save the model (can be a file or file-like object) opset_version=11, # the ONNX version to export the model to do_constant_folding=True, # whether to execute constant folding for optimization input_names=['input_ids', # the model's input names 'input_mask', 'segment_ids'], output_names=['start', 'end'], # the model's output names dynamic_axes={'input_ids': symbolic_names, # variable length axes 'input_mask' : symbolic_names, 'segment_ids' : symbolic_names, 'start' : symbolic_names, 'end' : symbolic_names}) print("Model exported at ", export_model_path) ``` ## 3. PyTorch Inference ## Use PyTorch to evaluate an example input for comparison purpose. ``` import time # Measure the latency. It is not accurate using Jupyter Notebook, it is recommended to use standalone python script. latency = [] with torch.no_grad(): for i in range(total_samples): data = dataset[i] inputs = { 'input_ids': data[0].to(device).reshape(1, max_seq_length), 'attention_mask': data[1].to(device).reshape(1, max_seq_length), 'token_type_ids': data[2].to(device).reshape(1, max_seq_length) } start = time.time() outputs = model(**inputs) latency.append(time.time() - start) print("PyTorch {} Inference time = {} ms".format(device.type, format(sum(latency) * 1000 / len(latency), '.2f'))) ``` ## 4. Inference ONNX Model with ONNX Runtime ## For Onnx Runtime 1.6.0 or older, OpenMP environment variables are very important for CPU inference of Bert model. Since 1.7.0, the official package is not built with OpenMP. Now we inference the model with ONNX Runtime. Here we can see that OnnxRuntime has better performance than PyTorch. ``` import onnxruntime import numpy sess_options = onnxruntime.SessionOptions() # Optional: store the optimized graph and view it using Netron to verify that model is fully optimized. # Note that this will increase session creation time, so it is for debugging only. sess_options.optimized_model_filepath = os.path.join(output_dir, "optimized_model_cpu.onnx") # For OnnxRuntime 1.7.0 or later, you can set intra_op_num_threads to set thread number like # sess_options.intra_op_num_threads=4 # Here we use the default value which is a good choice in most cases. # Specify providers when you use onnxruntime-gpu for CPU inference. session = onnxruntime.InferenceSession(export_model_path, sess_options, providers=['CPUExecutionProvider']) latency = [] for i in range(total_samples): data = dataset[i] ort_inputs = { 'input_ids': data[0].cpu().reshape(1, max_seq_length).numpy(), 'input_mask': data[1].cpu().reshape(1, max_seq_length).numpy(), 'segment_ids': data[2].cpu().reshape(1, max_seq_length).numpy() } start = time.time() ort_outputs = session.run(None, ort_inputs) latency.append(time.time() - start) print("OnnxRuntime cpu Inference time = {} ms".format(format(sum(latency) * 1000 / len(latency), '.2f'))) print("***** Verifying correctness *****") for i in range(2): print('PyTorch and ONNX Runtime output {} are close:'.format(i), numpy.allclose(ort_outputs[i], outputs[i].cpu(), rtol=1e-05, atol=1e-04)) ``` ## 5. Offline Optimization Script and Test Tools It is recommended to try [OnnxRuntime Transformer Model Optimization Tool](https://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers) on the exported ONNX models. It could help verify whether the model can be fully optimized, and get performance test results. #### Transformer Optimizer Although OnnxRuntime could optimize Bert model exported by PyTorch. Sometime, model cannot be fully optimized due to different reasons: * A new subgraph pattern is generated by new version of export tool, and the pattern is not covered by older version of OnnxRuntime. * The exported model uses dynamic axis and this makes it harder for shape inference of the graph. That blocks some optimization to be applied. * Some optimization is better to be done offline. Like change input tensor type from int64 to int32 to avoid extra Cast nodes, or convert model to float16 to achieve better performance in V100 or T4 GPU. We have python script **optimizer.py**, which is more flexible in graph pattern matching and model conversion (like float32 to float16). You can also use it to verify whether a Bert model is fully optimized. In this example, we can see that it introduces optimization that is not provided by onnxruntime: SkipLayerNormalization and bias fusion, which is not fused in OnnxRuntime due to shape inference as mentioned. It will also tell whether the model is fully optimized or not. If not, that means you might need change the script to fuse some new pattern of subgraph. Example Usage: ``` from onnxruntime.transformers import optimizer optimized_model = optimizer.optimize_model(export_model_path, model_type='bert', num_heads=12, hidden_size=768) optimized_model.save_model_to_file(optimized_model_path) ``` You can also use command line like the following: ``` optimized_model_path = os.path.join(output_dir, 'bert-base-cased-squad_opt_cpu.onnx') !{sys.executable} -m onnxruntime.transformers.optimizer --input $export_model_path --output $optimized_model_path --model_type bert --num_heads 12 --hidden_size 768 ``` #### Optimized Graph When you can open the optimized model using Netron to visualize, the graph is like the following: <img src='images/optimized_bert_gpu.png'> For CPU, optimized graph is slightly different: FastGelu is replaced by BiasGelu. ``` import netron # Change it to False to skip viewing the optimized model in browser. enable_netron = True if enable_netron: # If you encounter error "access a socket in a way forbidden by its access permissions", install Netron as standalone application instead. netron.start(optimized_model_path) ``` #### Model Results Comparison Tool If your BERT model has three inputs, a script compare_bert_results.py can be used to do a quick verification. The tool will generate some fake input data, and compare results from both the original and optimized models. If outputs are all close, it is safe to use the optimized model. Example of verifying models: ``` !{sys.executable} -m onnxruntime.transformers.compare_bert_results --baseline_model $export_model_path --optimized_model $optimized_model_path --batch_size 1 --sequence_length 128 --samples 100 ``` #### Performance Test Tool This tool measures performance of BERT model inference using OnnxRuntime Python API. The following command will create 100 samples of batch_size 1 and sequence length 128 to run inference, then calculate performance numbers like average latency and throughput etc. You can increase number of samples (recommended 1000) to get more stable result. ``` !{sys.executable} -m onnxruntime.transformers.bert_perf_test --model $optimized_model_path --batch_size 1 --sequence_length 128 --samples 100 --test_times 1 ``` Let's load the summary file and take a look. ``` import os import glob import pandas latest_result_file = max(glob.glob(os.path.join(output_dir, "perf_results_*.txt")), key=os.path.getmtime) result_data = pandas.read_table(latest_result_file, converters={'OMP_NUM_THREADS': str, 'OMP_WAIT_POLICY':str}) print(latest_result_file) # Remove some columns that have same values for all rows. columns_to_remove = ['model', 'graph_optimization_level', 'batch_size', 'sequence_length', 'test_cases', 'test_times', 'use_gpu'] # Hide some latency percentile columns to fit screen width. columns_to_remove.extend(['Latency_P50', 'Latency_P95']) result_data.drop(columns_to_remove, axis=1, inplace=True) result_data ``` ## 6. Additional Info Note that running Jupyter Notebook has slight impact on performance result since Jupyter Notebook is using system resources like CPU and memory etc. It is recommended to close Jupyter Notebook and other applications, then run the performance test tool in a console to get more accurate performance numbers. We have a [benchmark script](https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/transformers/run_benchmark.sh). It is recommended to use it compare inference speed of OnnxRuntime with PyTorch. [OnnxRuntime C API](https://github.com/microsoft/onnxruntime/blob/master/docs/C_API.md) could get slightly better performance than python API. If you use C API in inference, you can use OnnxRuntime_Perf_Test.exe built from source to measure performance instead. Here is the machine configuration that generated the above results. The machine has GPU but not used in CPU inference. You might get slower or faster result based on your hardware. ``` !{sys.executable} -m onnxruntime.transformers.machine_info --silent ```
github_jupyter
充分利用python的特点和功能, 可以让我们的代码更加的简洁, 有更好的可读性, 很多情况下也会带来更好的性能。 符合这样要求的代码也被python社区称为pythonic的代码。 这里举例一些常见的使用python时遇到的场景。 针对这些场景, 给出两个版本的代码, 它们都实现了同样的功能, 但是第一个版本没有充分利用python特性,另一个版本则比较pythonic。 pythonic的代码除了简介优雅,大部分时候,也会有更好的性能(因为它们经过特别优化)。为了验证这点,我们可以利用helper.py中的func_compare函数比较两个版本的代码的性能。 ``` # 这段代码定义了一个函数, 可以将相同的参数传给两个版本的函数后比较运行时间, 后面的代码实验中会用到, 默认每个函数重复100000次。 from helper import func_compare ``` ## tuple/list unpacking ### 批量赋值给多个变量 从一个tuple/list中将数据取出, 赋值给几个不同的变量是一个常见的需求。有时候这个tuple/list中还会嵌套更多的tuple/list。 下面的例子里, 我们有这样一个tuple ``` d = ('male',('Junjie', 'Cai'), 'cjj@jfpal.com', 'data engineer') ``` 现在希望将里面的性别, 姓, 名, 邮箱几个数据取出并赋值给相应的变量后, 然后用这些变量组成一个字符串返回。 版本1的代码用index一个个的取出数值 ``` def func_ver_1(d): sex = d[0] email = d[2] name = d[1] family_name = name[1] first_name = name[0] title = d[3] return "{family_name} {first_name} is a {sex} {title} with email:{email}".format(**locals()) ``` 版本2的代码则利用tuple unpacking一次性的取出所有数值。 ``` def func_ver_2(d): sex, (first_name, family_name), email, title = d return "{family_name} {first_name} is a {sex} {title} with email:{email}".format(**locals()) d = ('male',('Junjie', 'Cai'), 'cjj@jfpal.com', 'data engineer') func_compare(func_ver_1, func_ver_2, [d]) ``` ### 在循环中利用 上面的技巧可以放在for循环中使用。看下面的例子。 ``` d_list = [ ('male',('Junjie', 'Cai'), 'cjj@jfpal.com', 'data engineer'), ('male',('Hongfei', 'Bao'), 'bhf@jfpal.com', 'data engineer') ] def func_ver_1(d_list): str_list = [] for d in d_list: sex = d[0] email = d[2] name = d[1] family_name = name[1] first_name = name[0] title = d[3] str_list.append("{family_name} {first_name} is a {sex} {title} with email:{email}".format(**locals())) return str_list def func_ver_2(d): str_list = [] for sex, (first_name, family_name), email, title in d_list: str_list.append("{family_name} {first_name} is a {sex} {title} with email:{email}".format(**locals())) return str_list func_compare(func_ver_1, func_ver_2, [d_list]) ``` ### 选取开头和结尾部分的变量 假设这里只需要取出第一个和最后一个变量。 tuple/list unpacking是不能只选择头尾部分的数据的, 但是如果对tuple/list中间的部分不感兴趣, 可以用 **```*_```** 统一接收, 这样就会将中间段的数据放入变量名为**_**的list中, 后面代码不使用即可。 ``` def func_ver_1(d): sex = d[0] email = d[2] return "Owner of {email} is {sex}".format(**locals()) def func_ver_2(d): sex, *_, email = d #注意这里 return "Owner of {email} is {sex}".format(**locals()) d = ('male',('Junjie', 'Cai'), 'cjj@jfpal.com') func_compare(func_ver_1, func_ver_2, [d]) ``` 注意这里使用**_**作为变量名, 只是一种约定, 表示unpacking后,这个变量存的信息代码的作者并不关心。 不过实际上使用任何合法的变量名都是可以的。 ### 数值交换 有a, b两个变量 ``` a = 1 b = 10 ``` 我们需要交换a, b两个变量的数值。这时也可以利用tuple unpacking的技巧。 ``` def func_ver_1(a, b): temp = b b = a a = temp return (a, b) def func_ver_2(a, b): a, b = b, a # 注意这里 return (a, b) func_compare(func_ver_1, func_ver_2, [a,b]) ``` 注意虽然没有添加(),但是python也是创建了一个tuple哦。 ``` x = 1,2 print(type(x)) ``` 这种场景很容易推广到多个变量数值进行交换的情况 ``` a = 1 b = 2 c = 3 b, c, a = a, b, c print(a, b, c) ``` ## 字符串连接 一个很常见的任务就是把list中的字符串用某个特定的字符进行拼接, 例如我们有这样一个list ``` s_list = ['To', 'be', 'or', 'not', 'to', 'be'] ``` 希望把他们用空格去连接几个单词组成一个句子 ``` def func_1(s_list, sep): string = '' for i, s in enumerate(s_list): if i!=0: string += sep+s else: string += s return string def func_2(s_list,sep): return sep.join(s_list) sep = ' ' func_compare(func_1, func_2, [s_list, sep]) ``` 第二个版本中, 用python内置的函数比自己去实现, 简洁度, 效率都高了不是一点点。 ## 二. 创建同值的list 某时候我们需要快速的创建一个list, 里面的值是相同的。 例如, 我们创建一个长度为4的list, 其中每个位置的内容都是[1,2,1,2,1,2,1,2] ``` def func_1(n): l = [] for i in range(n): l.append(1) l.append(2) return l def func_2(n): return [1, 2]*n func_compare(func_1, func_2, [4]) ``` python提供的**```*```**运算符可以将list复制扩增。 不过这里要注意的是, 用这种方法创建的list中存放的数据如果是object, 它们指向的都是同一个object。 用下面的代码可以验证这点 ``` a = [ [0,0,0,0] ] * 4 print(a) print(a[0] is a[1]) a[0][0] = 10 print(a) ``` 对于非mutable object没有这个问题 ``` l = [1] * 4 l[0] = 10 l ``` ## for循环是否全通过 有时候我们要判断一个for循环是否中途被break掉还是“寿终正寝”。 例如下面判断list_a中每一个元素是否都属于list_b(这里只是为了演示for...else...,完成这个需求更好的办法是用set) ``` list_a = ['java','spark','python'] list_b = ['java','html5','excel','spark','python'] def func_1(list_a, list_b): is_all_in = True for x in list_a: if not x in list_b: is_all_in = False break if is_all_in: return 'all item in list_a is in list_b' else: return 'not all item in list_a is in list_b' def func_2(available, demand): for x in list_a: if not x in list_b: return 'not all item in list_a is in list_b' else: # if all passed return 'all item in list_a is in list_b' func_compare(func_1, func_2, [list_a, list_b]) ``` for ...else...中的else刚接触会觉得有点别扭, 总之记住如果for循环部分执行了break, 或者出现Exception, else分支就不会被执行即可。 利用这个语句可以省掉一个flag变量标记for循环是否中途被break掉。 ## 10>c>b>a>0? 如何判断10>c>b>a>0? ``` def func_ver_1(a,b,c): return (10>c) and (c>b) and (b>a) and (a>2) def func_ver_2(a, b, c): return 10>c>b>a>2 #注1 a = 1 b = 3 c = 5 func_compare(func_ver_1, func_ver_2, [a, b, c]) ``` 几乎每个学C语言的同学都课上都会被强调不能用第二个版本的中的写法,否则无法得到正确的结果。不过这种写法在python确是完全可以的哦。 C语言中下面的例子得到的结果是False ``` c = 5 10 > 5 > 2 ``` ## 判断空容器和空值 ### 判断容器为空 下面的例子里, 我们检查一个list是否为空, 如果是, 返回True, 否则返回False ``` def func_ver_1(l): if len(l)>0: return True else: return False def func_ver_2(l): if l: # 不需要用len return True else: return False l = [] func_compare(func_ver_1, func_ver_2, [[]]) ``` 如果把list直接用在if语句中, 那么如果它们是空的, 对于if相当于False, 否则为True。 因此如果目的是判断它们是否为空, 就不用额外的使用len求长度了。 这点同样适用于tuple, set, dict, string, 见下面代码 ``` if (): pass else: print('tuple is empty') if set(): pass else: print('set is empty') if {}: pass else: print('dict is empty') if "": pass else: print('string is empty') ``` 不过很遗憾, 对于数据分析常用的pandas.DataFrame和pandas.Series, 并不支持这个特征, 直接放在if里会报错。 ``` try: from pandas import DataFrame if DataFrame(): pass else: print('DataFrame is empty') except Exception as e: print(type(e),e) class MyDataFrame(DataFrame): def __bool__(self): if len(self) > 0: return True else: return False if MyDataFrame(): pass else: print('DataFrame is empty') ``` ### 判断是否是None 这里假设数据是None返回False, 否则返回Yes ``` def func_ver_1(l): if x is None: return True else: return False def func_ver_2(l): if x: # 注意这里 return False else: return True x = None func_compare(func_ver_1, func_ver_2, [[]]) ``` 如果将None直接用在if中, 相当于False, 因此可以利用这点很方便的判断一个数据是否为None ## list/generator/dict/set comprehension 一个很常见的场景是, 把一个容器中的元素进行遍历,取出符合条件的值, 进行变换处理,然后存放在另一个容器中。 例如下面把list中的偶数取出, 求平方后存在另一个list。 ``` l = [1,2,3,4,5,6,7,8,9,10] def func_ver_1(l): new_list = [] for v in l: if v%2==0: new_list.append(v**2) return new_list def func_ver_2(l): return [v**2 for v in l if v%2==0] #注意 func_compare(func_ver_1, func_ver_2, [l]) ``` 版本2的写法叫做list comprehension功能,它有几个组成要素。换行后更容易观察。 ``` [ #定义新容器的类型,这里是list v**2 #需要进行的变换方式 for v in l # 对原容器进行遍历 if v%2==0 #筛选条件, 这部分是可选的 ] ``` 对应的也有dict comprehension, 例如下面的例子中。 ``` d = {1:'apple', 2:'peach', 3:'melon', 4:'banana'} ``` 我们把key为偶数的item取出后, 将value变成大写, 然后将这个key-value pair存入新的dict ``` def func_ver_1(d): new_dict = {} for k in d: if k%2==0: new_dict[k] = d[k].upper() return new_dict def func_ver_2(d): return { k:v.upper() for k,v in d.items() if k%2 == 0 } #注1 d = {1:'apple', 2:'peach', 3:'melon', 4:'banana'} func_compare(func_ver_1, func_ver_2, [d]) ``` 换行后更好的观察各个要素 ``` new_d = { #定义新容器的类型,这里是dict k:v.upper() #新的key-value pair,可以分别进行变换 for k,v in d.items() # 对原来的dict进行遍历 if k%2==0 #筛选条件,k是偶数 } print(new_d) ``` 同样,有set comprehension。 例如将下面list中的偶数取出,放入一个set ``` l = [1,2,3,4,5,6,7,8,9,10] s = {v for v in l if v%2==0} print(s) ``` 有一点要注意的是,如果使用(),得到的并不是一个tuple,而是generator ``` l = [1,2,3,4,5,6,7,8,9,10] g = (v for v in l if v%2==0) print(g) print(list(g)) ``` ## 两个list创建一个dict 这也是一个非常常见的场景。一个list提供key, 另一个list提供value ``` def func_1(l_key, l_value): d = {} for i, v in enumerate(l_key): d[v] = l_value[i] return d def func_2(l_key, l_value): return dict(zip(l_key, l_value)) #注1 l_key = [1,2,3,4,5] l_value = ['a','b','c','d','e'] func_compare(func_1, func_2, [l_key, l_value]) list(zip(l_key, l_value)) ``` 这里利用zip函数将两个list合并成一个generator, generator会按顺序输出一系列tuple, tuple中的数据分别来自两个list. 见下面代码。 ``` list_a = ['a','b','c'] list_b = [1,2,3] z = zip(list_a, list_b) print(next(z)) print(next(z)) print(next(z)) ``` dict()函数可以接受输出一列二元tuple的generator并构造dict,这种写法非常简洁。 ## 确保资源被正确关闭 下面的场景里, 我们要确保打开了一个文件后, 要确保它们被关闭 ``` def func_ver_1(file_name): try: f = open(file_name, 'w') except Exception as e: pass finally: f.close() def func_ver_2(file_name): with open(file_name, 'w') as f: #注1 pass file_name = 'test_temp_file.txt' func_compare(func_ver_1, func_ver_2, [file_name],n=10000) ``` with语法在python中叫做context manager, 在with语句去打开文件, 将对这个文件所需的处理语句放在with中, 这样无论这些语句是否raise Exception, 都能保证文件都能被关闭。 可以省掉一大块try, except, finnaly组合, 让代码简洁不少。 其他一些连接数据资源, 比如连接数据库的函数, 一般也会支持这种用法。 ## 判断数据的类型 下面我们判断a是否是tuple或list, 是的话返回True, 否则False ``` def func_ver_1(a): if (str(type(a)) == "<class 'tuple'>") or (str(type(a)) == "<class 'list'>"): return True else: return False def func_ver_2(a): if isinstance(a, (tuple, list)): #注1 return True else: return False func_compare(func_ver_1, func_ver_2, [a]) ``` 判断是否函数可以用```callable()``` ``` def a(): pass callable(a) ``` 注释 1. 由于很多python教材都会在早期的时候引入对type函数的介绍, 因此初学者可能会很长时间内使用type去判断数据的类型。 不过isinstance是更好的选择。 # 跨行的字符串连接 ``` my_very_big_string = ( "For a long time I used to go to bed early. Sometimes, " "when I had put out my candle, my eyes would close so quickly " "that I had not even time to say “I’m going to sleep.”" ) my_very_big_string ``` 不过这个特性也很容易成为程序中的bug ``` l = [ 'x' # 创建list的时候,有时候会遗漏逗号。这时候python是不会报错的 'y', 'z' ] l ```
github_jupyter
# **DIVE INTO CODE COURSE** ## **Graduation Assignment** **Student Name**: Doan Anh Tien<br> **Student ID**: 1852789<br> **Email**: tien.doan.g0pr0@hcmut.edu.vn ## Introduction The graduation assignment was based on one of the challenges from the Vietnamese competition **Zalo AI Challenge**. The description of the challenge is described as follows: > During the Covid-19 outbreak, the Vietnamese government pushed the "5K" public health safety message. In the message, masking and keeping a safe distance are two key rules that have been shown to be extremely successful in preventing people from contracting or spreading the virus. Enforcing these principles on a large scale is where technology may help. In this challenge, you will create algorithm to detect whether or not a person or group of individuals in a picture adhere to the "mask" and "distance" standards. **Basic rules** We are given the dataset contains images of people either wearing mask or not and they are standing either close of far from each other. Our mission is to predict whether the formation of these people adhere the 5k standard. The 5k standard is also based on the two conditions, mask (0 == not wearing, 1 == wearing) and distancing (0 == too close, 1 == far enough). People that adhere the 5k standard will not likely to expose the virus to each other in case they did caught it before, and it is to prevent the spread of the COVID-19 pandamic through people interactions. --- ``` !pip install wandb import os import math import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from tqdm import tqdm import tensorflow as tf import wandb from wandb.keras import WandbCallback from tensorflow.data import AUTOTUNE from tensorflow import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, RandomZoom, RandomFlip, InputLayer from tensorflow.keras.layers.experimental.preprocessing import RandomWidth from tensorflow.keras.losses import sparse_categorical_crossentropy from tensorflow.keras.optimizers import Adam, SGD from sklearn.model_selection import KFold, StratifiedKFold from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, f1_score import tensorflow_hub as hub from PIL import Image tf.config.run_functions_eagerly(True) tf.data.experimental.enable_debug_mode() print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU'))) print('Tensorflow version:', tf.__version__) print('Keras version:', keras.__version__) !nvidia-smi # For GPU runtime ``` ## **Recreate the pipeline** Since making the process of detecting mask and distancing to be seperated procedures, evaluate new models or changing hyperparameters would be exhausted. In this section, I manage to create the pipeline that can be run once to train, predict and monitor the metrics. But before heading to that part, we can re-examine our problem to find a better way for a better results. One problem still remains is that the dataset contain so many missing values, and it is in fact can affect our model predictions, hence getting less accuracy. Missing values ``` # data_path = '/content/drive/MyDrive/Colab Notebooks/DIVE INTO CODE/Graduation/data' data_path = "/kaggle/input/zalo-ai-challenge-2021-5k-compliance/5k-compliance/train/" model_path = "/kaggle/working" img_dir = os.path.join(data_path, 'images') meta = pd.read_csv(data_path + 'train_meta.csv') plt.figure(figsize=(10,6)) sns.heatmap(meta.isnull(), cbar=False) print('Num. missing mask',\ len(meta[meta['mask'].isna()])) print('Num. missing distancing',\ len(meta[meta['distancing'].isna()])) print('Num. missing 5k',\ len(meta[meta['5k'].isna()])) print('Num. missing mask and distancing:',\ len(meta[(meta['mask'].isna()) & (meta['distancing'].isna())])) print('Num. missing mask and 5k:',\ len(meta[(meta['mask'].isna()) & (meta['5k'].isna())])) print('Num. missing distancing and 5k:',\ len(meta[(meta['distancing'].isna()) & (meta['5k'].isna())])) print('Num. missing all three attributes:',\ len(meta[(meta['mask'].isna()) & (meta['distancing'].isna()) & (meta['5k'].isna())])) ``` Apparently, the missing values are occurs as either missing one of three attribute, or a pair of attributes respectively (except for mask and distancing). None of row missing all three attributes. To get the 5k value, we should have know the mask and distancing value first. Luckily, none of row miss these two variables. Therefore, we can fill the missing values with our own logics (not all the cases). The original rule for 5k evaluation can be described as follow: ``` 5k = 1 if (mask == 1 and distancing == 1) else 0 ``` Base on this, we can design a pipeline that can fill out the missing values and produce better results: > 1. Model mask detection -> Use to predict the missing mask values -> From there continue to fill the missing distancing values ``` if (mask == 1) and (5k == 1): distancing = 1 elif (mask == 1) and (5k == 0): distancing = 0 elif (mask == 0) and (5k == 0) distancing = 0 ``` In case the mask is 0, we can skip it since `mask == 0 and 5k == 0` is the only case we can intepret with and in that case, I have run the code: `meta[(meta['mask'] == 0) & (meta['5k'] == 0) & (meta['distancing'].isna())]` and it return nothing. So it is safe to assume this part does not miss any values and is skippable. > 2. Model distancing -> Use to predict the missing 5k values ``` if (distancing == 1) and (mask == 1) 5k == 1 elif (distancing == 0) or (mask == 0) 5k == 0 ``` > 3. Model 5k -> Use to predict the final output 5k In conclusion, the difference between the previous section and this section is that we will make three models instead of two. This is doable as we are going to fill the missing 5k values, thus we can use this attribute for our final prediction. #### Define resources ``` !wandb login 88c91a7dc6dd5574f423e38f852c6fe640a7fcd0 wandb.init(project="diveintocode-grad-2nd-approach", entity="atien228") ``` #### Hyparameters ``` standard = 'mask' #@param ['mask', 'distancing'] SEED = 42 #@param {type:'integer'} wandb.config = { "learning_rate": 0.001, "epochs": 13, "batch_size": 20, "momentum": 0.5, "smoothing": 0.1 } # Model configuration os.environ["TFHUB_CACHE_DIR"] = model_path + "/tmp/model" model_name = "efficientnetv2-b3-21k-ft1k" # @param ['efficientnetv2-s', 'efficientnetv2-m', 'efficientnetv2-l', 'efficientnetv2-s-21k', 'efficientnetv2-m-21k', 'efficientnetv2-l-21k', 'efficientnetv2-xl-21k', 'efficientnetv2-b0-21k', 'efficientnetv2-b1-21k', 'efficientnetv2-b2-21k', 'efficientnetv2-b3-21k', 'efficientnetv2-s-21k-ft1k', 'efficientnetv2-m-21k-ft1k', 'efficientnetv2-l-21k-ft1k', 'efficientnetv2-xl-21k-ft1k', 'efficientnetv2-b0-21k-ft1k', 'efficientnetv2-b1-21k-ft1k', 'efficientnetv2-b2-21k-ft1k', 'efficientnetv2-b3-21k-ft1k', 'efficientnetv2-b0', 'efficientnetv2-b1', 'efficientnetv2-b2', 'efficientnetv2-b3', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6', 'efficientnet_b7', 'bit_s-r50x1', 'inception_v3', 'inception_resnet_v2', 'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152', 'resnet_v2_50', 'resnet_v2_101', 'resnet_v2_152', 'nasnet_large', 'nasnet_mobile', 'pnasnet_large', 'mobilenet_v2_100_224', 'mobilenet_v2_130_224', 'mobilenet_v2_140_224', 'mobilenet_v3_small_100_224', 'mobilenet_v3_small_075_224', 'mobilenet_v3_large_100_224', 'mobilenet_v3_large_075_224'] trainable = True loss_function = sparse_categorical_crossentropy optimizer = SGD(learning_rate=wandb.config['learning_rate'], momentum=wandb.config['momentum'] ) verbose = 1 num_folds = 2 # Testing ``` #### FiveKCompliance class ``` class FiveKCompliance(): def __init__(self, model_name, seed, config, data_path): self.seed = seed self.lr = config['learning_rate'] self.epochs = config['epochs'] self.batch_size = config['batch_size'] self.momentum = config['momentum'] self.smoothing = config['smoothing'] self.n_channels = 3 self.buffer_size = 1024 self.data_path = data_path self.model_name = model_name # ------- Functions for data-set processing --------------- # def create_dataset(self, df_train, df_val): # Create tf.data.Dataset from the tuple of image path and labels ds_train = tf.data.Dataset.from_tensor_slices((list(df_train[0]), list(df_train[1]))) # Configure with W&B settings ds_train = (ds_train .shuffle(buffer_size=self.buffer_size) .map(self.load_img, num_parallel_calls=AUTOTUNE) .batch(self.batch_size) .cache() .prefetch(AUTOTUNE)) if df_val is not None: ds_val = tf.data.Dataset.from_tensor_slices((list(df_val[0]), list(df_val[1]))) # Configure with W&B settings ds_val = (ds_val .shuffle(buffer_size=self.buffer_size) .map(self.load_img, num_parallel_calls=AUTOTUNE) .batch(self.batch_size) .cache() .prefetch(AUTOTUNE)) return ds_train, ds_val def preprocessing_data(self, meta, standard): dataset = [] label = [] for idx, row in meta.iterrows(): if pd.notna(row[standard]): dataset.append(os.path.join(img_dir, row['fname'])) # Mask or distancing label.append(row[standard]) df_train, df_val, label_train, label_val = train_test_split(dataset, label, test_size=0.2, random_state=SEED, shuffle=True) return df_train, df_val, label_train, label_val def load_img(self, path, label): img = tf.io.read_file(path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.resize(img, self.img_size) onehot_label = tf.argmax(label == [0.0, 1.0]) return img, onehot_label # ------- Functions for modeling --------------- # def get_hub_url_and_isize(self, model_name): model_handle_map = { "efficientnetv2-s": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_s/feature_vector/2", "efficientnetv2-m": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_m/feature_vector/2", "efficientnetv2-l": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_l/feature_vector/2", "efficientnetv2-s-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_s/feature_vector/2", "efficientnetv2-m-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_m/feature_vector/2", "efficientnetv2-l-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_l/feature_vector/2", "efficientnetv2-xl-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_xl/feature_vector/2", "efficientnetv2-b0-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b0/feature_vector/2", "efficientnetv2-b1-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b1/feature_vector/2", "efficientnetv2-b2-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b2/feature_vector/2", "efficientnetv2-b3-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b3/feature_vector/2", "efficientnetv2-s-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_s/feature_vector/2", "efficientnetv2-m-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_m/feature_vector/2", "efficientnetv2-l-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_l/feature_vector/2", "efficientnetv2-xl-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_xl/feature_vector/2", "efficientnetv2-b0-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b0/feature_vector/2", "efficientnetv2-b1-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b1/feature_vector/2", "efficientnetv2-b2-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b2/feature_vector/2", "efficientnetv2-b3-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b3/feature_vector/2", "efficientnetv2-b0": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b0/feature_vector/2", "efficientnetv2-b1": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b1/feature_vector/2", "efficientnetv2-b2": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b2/feature_vector/2", "efficientnetv2-b3": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b3/feature_vector/2", "efficientnet_b0": "https://tfhub.dev/tensorflow/efficientnet/b0/feature-vector/1", "efficientnet_b1": "https://tfhub.dev/tensorflow/efficientnet/b1/feature-vector/1", "efficientnet_b2": "https://tfhub.dev/tensorflow/efficientnet/b2/feature-vector/1", "efficientnet_b3": "https://tfhub.dev/tensorflow/efficientnet/b3/feature-vector/1", "efficientnet_b4": "https://tfhub.dev/tensorflow/efficientnet/b4/feature-vector/1", "efficientnet_b5": "https://tfhub.dev/tensorflow/efficientnet/b5/feature-vector/1", "efficientnet_b6": "https://tfhub.dev/tensorflow/efficientnet/b6/feature-vector/1", "efficientnet_b7": "https://tfhub.dev/tensorflow/efficientnet/b7/feature-vector/1", "bit_s-r50x1": "https://tfhub.dev/google/bit/s-r50x1/1", "inception_v3": "https://tfhub.dev/google/imagenet/inception_v3/feature-vector/4", "inception_resnet_v2": "https://tfhub.dev/google/imagenet/inception_resnet_v2/feature-vector/4", "resnet_v1_50": "https://tfhub.dev/google/imagenet/resnet_v1_50/feature-vector/4", "resnet_v1_101": "https://tfhub.dev/google/imagenet/resnet_v1_101/feature-vector/4", "resnet_v1_152": "https://tfhub.dev/google/imagenet/resnet_v1_152/feature-vector/4", "resnet_v2_50": "https://tfhub.dev/google/imagenet/resnet_v2_50/feature-vector/4", "resnet_v2_101": "https://tfhub.dev/google/imagenet/resnet_v2_101/feature-vector/4", "resnet_v2_152": "https://tfhub.dev/google/imagenet/resnet_v2_152/feature-vector/4", "nasnet_large": "https://tfhub.dev/google/imagenet/nasnet_large/feature_vector/4", "nasnet_mobile": "https://tfhub.dev/google/imagenet/nasnet_mobile/feature_vector/4", "pnasnet_large": "https://tfhub.dev/google/imagenet/pnasnet_large/feature_vector/4", "mobilenet_v2_100_224": "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/4", "mobilenet_v2_130_224": "https://tfhub.dev/google/imagenet/mobilenet_v2_130_224/feature_vector/4", "mobilenet_v2_140_224": "https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/feature_vector/4", "mobilenet_v3_small_100_224": "https://tfhub.dev/google/imagenet/mobilenet_v3_small_100_224/feature_vector/5", "mobilenet_v3_small_075_224": "https://tfhub.dev/google/imagenet/mobilenet_v3_small_075_224/feature_vector/5", "mobilenet_v3_large_100_224": "https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/feature_vector/5", "mobilenet_v3_large_075_224": "https://tfhub.dev/google/imagenet/mobilenet_v3_large_075_224/feature_vector/5", } model_image_size_map = { "efficientnetv2-s": 384, "efficientnetv2-m": 480, "efficientnetv2-l": 480, "efficientnetv2-b0": 224, "efficientnetv2-b1": 240, "efficientnetv2-b2": 260, "efficientnetv2-b3": 300, "efficientnetv2-s-21k": 384, "efficientnetv2-m-21k": 480, "efficientnetv2-l-21k": 480, "efficientnetv2-xl-21k": 512, "efficientnetv2-b0-21k": 224, "efficientnetv2-b1-21k": 240, "efficientnetv2-b2-21k": 260, "efficientnetv2-b3-21k": 300, "efficientnetv2-s-21k-ft1k": 384, "efficientnetv2-m-21k-ft1k": 480, "efficientnetv2-l-21k-ft1k": 480, "efficientnetv2-xl-21k-ft1k": 512, "efficientnetv2-b0-21k-ft1k": 224, "efficientnetv2-b1-21k-ft1k": 240, "efficientnetv2-b2-21k-ft1k": 260, "efficientnetv2-b3-21k-ft1k": 300, "efficientnet_b0": 224, "efficientnet_b1": 240, "efficientnet_b2": 260, "efficientnet_b3": 300, "efficientnet_b4": 380, "efficientnet_b5": 456, "efficientnet_b6": 528, "efficientnet_b7": 600, "inception_v3": 299, "inception_resnet_v2": 299, "nasnet_large": 331, "pnasnet_large": 331, } model_type = model_handle_map.get(model_name) pixels = model_image_size_map.get(model_name) print(f"Selected model: {model_name} : {model_type}") IMAGE_SIZE = (pixels, pixels) print(f"Input size {IMAGE_SIZE}") return model_type, IMAGE_SIZE, pixels def modeling(self, model_url, img_size, pixels, loss_function, optimizer, verbose, trainable): IMG_HEIGHT = IMG_WIDTH = pixels # Data augmentation layer for image data_augmentation = Sequential([ InputLayer(input_shape=[IMG_HEIGHT, IMG_WIDTH, self.n_channels]), RandomFlip("horizontal_and_vertical", seed=self.seed), RandomZoom(0.1, seed=self.seed), #RandomWidth(0.1, seed=self.seed), ]) EfficientNetV2 = hub.KerasLayer(model_url, trainable=trainable) # Trainable: Fine tuning self.model = Sequential([ data_augmentation, EfficientNetV2, Dropout(rate=0.2, seed=self.seed), Dense(units=2, # Binary classifcation activation='softmax', kernel_regularizer=tf.keras.regularizers.l2(0.0001)) ]) self.model.build((None,) + img_size + (self.n_channels,)) # (IMG_SIZE, IMG_SIZE, 3) self.model.summary() self.model.compile(optimizer=optimizer, loss=loss_function, metrics=['accuracy', self.f1]) def train(self, meta, num_folds, loss_function, optimizer, verbose, trainable, standard, model_path): self.model_url, self.img_size, self.pixels = self.get_hub_url_and_isize(self.model_name) df_train, df_test, label_train, label_test = self.preprocessing_data(meta, standard) df_train = tuple(zip(df_train, label_train)) df_test = tuple(zip(df_test, label_test)) df_train = tuple(zip(*df_train)) df_test = tuple(zip(*df_test)) ds_train, ds_test = self.create_dataset(df_train, df_test) ds_train = ds_train.shuffle(10) ds_val = ds_train.take(int(0.25*(len(ds_train)))) # Train/validation set split (80/20) ds_train = ds_train.skip(int(0.25*(len(ds_train)))) self.modeling(self.model_url, self.img_size, self.pixels, loss_function, optimizer, verbose, trainable) history = self.model.fit(ds_train, validation_data=ds_val, epochs=self.epochs, callbacks=[WandbCallback()], verbose=verbose) print('------------------------------Evaluation--------------------------------') scores = self.model.evaluate(ds_test, verbose=verbose) # Save model and weights self.model.save(model_path + f'/{standard}.h5') self.model.save_weights(model_path + f'/{standard}_weight.h5') @tf.autograph.experimental.do_not_convert def f1(self, y_true, y_pred): return f1_score(y_true, tf.math.argmax(y_pred, 1)) ``` Train the model ``` fivek = FiveKCompliance(model_name=model_name, seed=SEED, config=wandb.config, data_path=data_path) fivek.train(meta=meta, num_folds=num_folds, loss_function=loss_function, optimizer=optimizer, verbose=verbose, trainable=trainable, standard=standard, model_path=model_path + '/model_saved') ``` Load mask model and pseudo labeling distance value ``` # Redefine the f1 function for keras model dependencies @tf.autograph.experimental.do_not_convert def f1(self, y_true, y_pred): return f1_score(y_true, tf.math.argmax(y_pred, 1)) model_url, img_size, _ = fivek.get_hub_url_and_isize(model_name) dependencies = { 'f1': f1, 'KerasLayer': hub.KerasLayer(model_url, trainable=trainable) } model_mask = keras.models.load_model(f'../input/model-saved/{standard}.h5', custom_objects=dependencies) model_mask.summary() pseudo_lbl_img_name = meta['fname'] pseudo_lbl_img_path = img_dir def predict_custom(model, meta, img_path, img_size, case): no_img_filled = 0 for i, row in tqdm(enumerate(meta.itertuples(), 0)): if pd.isna(getattr(row, case)): image = tf.io.read_file(img_path + f'/{row.fname}') image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, img_size) prediction_scores = model.predict(np.expand_dims(image, axis=0)) meta.at[i, case] = np.argmax(prediction_scores) no_img_filled += 1 print(f"Predicted {no_img_filled} images.") return meta case = 'mask' new_meta_mask = predict_custom(model_mask, meta, img_dir, img_size, case) new_meta_mask_copy = new_meta_mask.copy() ``` ``` Part 1 if (mask == 1) and (5k == 1): distancing = 1 elif (mask == 1) and (5k == 0): distancing = 0 ``` ``` def pseudo_labeling(mask, distancing, fivek, label): new_distancing = distancing new_fivek = fivek if label == 'distancing': if fivek == 0 and mask == 1 and math.isnan(distancing): new_distancing = 1.0 elif fivek == 1 and mask == 1 and math.isnan(distancing): new_distancing = 0.0 return new_distancing elif label == '5k': if distancing == 1 and mask == 1 and math.isnan(fivek): new_fivek = 1.0 elif (distancing == 0 or mask == 0) and math.isnan(fivek): new_fivek = 0.0 return new_fivek new_meta_mask_copy['distancing'] = [pseudo_labeling(x, y, z, 'distancing') for x, y, z in zip(new_meta_mask_copy['mask'], new_meta_mask_copy['distancing'], new_meta_mask_copy['5k'])] new_meta_mask_copy.to_csv('train_meta_mask.csv') ``` Load distancing model and pseudo labeling 5k value ``` standard = 'distancing' model_distancing = keras.models.load_model(f'../input/modelsaved/{standard}.h5', custom_objects=dependencies) model_distancing.summary() case = 'distancing' new_meta_mask_distance = predict_custom(model_distancing, new_meta_mask_copy, img_dir, img_size, case) new_meta_mask_distance_copy = new_meta_mask_distance.copy() new_meta_mask_distance_copy['5k'] = [pseudo_labeling(x, y, z, '5k') for x, y, z in zip(new_meta_mask_distance_copy['mask'], new_meta_mask_distance_copy['distancing'], new_meta_mask_distance_copy['5k'])] ``` Check if there is any missing values ``` new_meta_mask_distance_copy.info() new_meta_mask_distance_copy.to_csv('train_meta_mask_distance.csv') ``` ### Final step of pipeline: Predict based on 5k value ``` standard = '5k' fivek = FiveKCompliance(model_name=model_name, seed=SEED, config=wandb.config, data_path=data_path) fivek.train(meta=new_meta_mask_distance_copy, num_folds=num_folds, loss_function=loss_function, optimizer=optimizer, verbose=verbose, trainable=trainable, standard=standard, model_path=model_path + '/model_saved') ``` ### Results from W&B (both approaches) ``` import pandas as pd import wandb api = wandb.Api() # Project is specified by <entity/project-name> runs_1 = api.runs("atien228/diveintocode-grad-1st-approach") runs_2 = api.runs("atien228/diveintocode-grad-2nd-approach") run_id_1, summary_list_1, config_list_1, name_list_1 = [], [], [], [] run_id_2, summary_list_2, config_list_2, name_list_2 = [], [], [], [] for run in runs_1: # .id contains the generated hash id for each run in the project run_id_1.append(run.id) # .summary contains the output keys/values for metrics like accuracy. # We call ._json_dict to omit large files summary_list_1.append(run.summary._json_dict) # .config contains the hyperparameters. # We remove special values that start with _. config_list_1.append( {k: v for k,v in run.config.items() if not k.startswith('_')}) # .name is the human-readable name of the run. name_list_1.append(run.name) runs_df_1 = pd.DataFrame({ "id": run_id_1, "summary": summary_list_1, "config": config_list_1, "name": name_list_1 }) for run in runs_2: # .id contains the generated hash id for each run in the project run_id_2.append(run.id) # .summary contains the output keys/values for metrics like accuracy. # We call ._json_dict to omit large files summary_list_2.append(run.summary._json_dict) # .config contains the hyperparameters. # We remove special values that start with _. config_list_2.append( {k: v for k,v in run.config.items() if not k.startswith('_')}) # .name is the human-readable name of the run. name_list_2.append(run.name) runs_df_2 = pd.DataFrame({ "id": run_id_2, "summary": summary_list_2, "config": config_list_2, "name": name_list_2 }) os.makedirs("wandb", exist_ok=True) runs_df_1.to_csv(os.path.join("wandb/", "first_approach.csv")) runs_df_2.to_csv(os.path.join("wandb/", "second_approach.csv")) ``` #### Print out the dataframes ``` print('First approach') runs_df_1 print('Second approach') runs_df_2 ``` #### Compare the performance of mask detection tasks ``` run1 = api.run(f"atien228/diveintocode-grad-1st-approach/{str(runs_df_1[runs_df_1['name']=='mask-task'].iloc[0]['id'])}") run2 = api.run(f"atien228/diveintocode-grad-2nd-approach/{str(runs_df_2[runs_df_2['name']=='mask-task-nofold'].iloc[0]['id'])}") run_ref = api.run(f"atien228/diveintocode-grad-1st-approach/{str(runs_df_1[runs_df_1['name']=='mask-task-2folds'].iloc[0]['id'])}") epoch, acc_1, acc_2, acc_ref, val_acc_1, val_acc_2, val_acc_ref, loss_1, loss_2, loss_ref, val_loss_1, val_loss_2, val_loss_ref, f1_1, f1_2, f1_ref, val_f1_1, val_f1_2 = \ [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [] for (i, row1), (j, row2), (j, row3) in zip(run1.history().iterrows(), run2.history().iterrows(), run_ref.history().iterrows()): epoch.append(row1["epoch"]) acc_1.append(row1['accuracy']) acc_2.append(row2['accuracy']) acc_ref.append(row3['accuracy']) val_acc_1.append(row1['val_accuracy']) val_acc_2.append(row2['val_accuracy']) val_acc_ref.append(row3['val_accuracy']) loss_1.append(row1['loss']) loss_2.append(row2['loss']) loss_ref.append(row3['loss']) val_loss_1.append(row1['val_loss']) val_loss_2.append(row2['val_loss']) val_loss_ref.append(row3['val_loss']) f1_1.append(row1['f1']) f1_2.append(row2['f1']) f1_ref.append(row3['f1']) val_f1_1.append(row1['val_f1']) val_f1_2.append(row2['val_f1']) wandb.init(project="diveintocode-grad-results-graph", entity="atien228", name="mask-accuracy-comp") wandb.log({"mask-only-accuracy_comparision" : wandb.plot.line_series( xs=epoch, ys=[acc_1, acc_2, acc_ref], keys=["1st approach", "2nd approach", "1st approach (mask) with 2 folds"], title="Accuracy of mask detection tasks", xname="epoch")}) wandb.init(project="diveintocode-grad-results-graph", entity="atien228", name="mask-val-comp") wandb.log({"mask-only-val_accuracy_comparision" : wandb.plot.line_series( xs=epoch, ys=[val_acc_1, val_acc_2, val_acc_ref], keys=["1st approach", "2nd approach", "1st approach (mask) with 2 folds"], title="Validation accuracy of mask detection tasks", xname="epoch")}) wandb.init(project="diveintocode-grad-results-graph", entity="atien228", name="mask-loss-comp") wandb.log({"loss_comparision" : wandb.plot.line_series( xs=epoch, ys=[loss_1, loss_2, loss_ref], keys=["1st approach", "2nd approach", "1st approach (mask) with 2 folds"], title="Loss of mask detection tasks", xname="epoch")}) wandb.init(project="diveintocode-grad-results-graph", entity="atien228", name="mask-val-loss-comp") wandb.log({"val_loss_comparision" : wandb.plot.line_series( xs=epoch, ys=[val_loss_1, val_loss_2, val_loss_ref], keys=["1st approach", "2nd approach", "1st approach (mask) with 2 folds"], title="Validation loss of mask detection tasks", xname="epoch")}) wandb.init(project="diveintocode-grad-results-graph", entity="atien228", name="mask-f1-comp") wandb.log({"f1_comparision" : wandb.plot.line_series( xs=epoch, ys=[f1_1, f1_2], keys=["1st approach", "2nd approach"], title="F1 score mask detection tasks", xname="epoch")}) wandb.init(project="diveintocode-grad-results-graph", entity="atien228", name="mask-val-f1-comp") wandb.log({"val_f1_comparision" : wandb.plot.line_series( xs=epoch, ys=[val_f1_1, val_f1_2], keys=["1st approach", "2nd approach"], title="Validation F1 score mask detection tasks", xname="epoch")}) ``` #### Compare the performance of distance detection tasks ``` run1 = api.run(f"atien228/diveintocode-grad-1st-approach/{str(runs_df_1[runs_df_1['name']=='distance-task'].iloc[0]['id'])}") run2 = api.run(f"atien228/diveintocode-grad-2nd-approach/{str(runs_df_2[runs_df_2['name']=='distance-task-nofold'].iloc[0]['id'])}") epoch, acc_1, acc_2, val_acc_1, val_acc_2, loss_1, loss_2, loss_ref, val_loss_1, val_loss_2, f1_1, f1_2, f1_ref, val_f1_1, val_f1_2 = \ [], [], [], [], [], [], [], [], [], [], [], [], [], [], [] for (i, row1), (j, row2)in zip(run1.history().iterrows(), run2.history().iterrows()): epoch.append(row1["epoch"]) acc_1.append(row1['accuracy']) acc_2.append(row2['accuracy']) val_acc_1.append(row1['val_accuracy']) val_acc_2.append(row2['val_accuracy']) loss_1.append(row1['loss']) loss_2.append(row2['loss']) val_loss_1.append(row1['val_loss']) val_loss_2.append(row2['val_loss']) f1_1.append(row1['f1']) f1_2.append(row2['f1']) val_f1_1.append(row1['val_f1']) val_f1_2.append(row2['val_f1']) wandb.init(project="diveintocode-grad-results-graph", entity="atien228", name="distance-accuracy-comp") wandb.log({"distance-only-accuracy_comparision" : wandb.plot.line_series( xs=epoch, ys=[acc_1, acc_2], keys=["1st approach", "2nd approach", "1st approach (mask) with 2 folds"], title="Accuracy of distance detection tasks", xname="epoch")}) wandb.init(project="diveintocode-grad-results-graph", entity="atien228", name="distance-val-comp") wandb.log({"distance-only-val_accuracy_comparision" : wandb.plot.line_series( xs=epoch, ys=[val_acc_1, val_acc_2], keys=["1st approach", "2nd approach", "1st approach (mask) with 2 folds"], title="Validation accuracy of distance detection tasks", xname="epoch")}) wandb.init(project="diveintocode-grad-results-graph", entity="atien228", name="distance-loss-comp") wandb.log({"loss_comparision" : wandb.plot.line_series( xs=epoch, ys=[loss_1, loss_2], keys=["1st approach", "2nd approach", "1st approach (mask) with 2 folds"], title="Loss of distance detection tasks", xname="epoch")}) wandb.init(project="diveintocode-grad-results-graph", entity="atien228", name="distance-val-loss-comp") wandb.log({"val_loss_comparision" : wandb.plot.line_series( xs=epoch, ys=[val_loss_1, val_loss_2], keys=["1st approach", "2nd approach", "1st approach (mask) with 2 folds"], title="Validation loss of distance detection tasks", xname="epoch")}) wandb.init(project="diveintocode-grad-results-graph", entity="atien228", name="distance-f1-comp") wandb.log({"f1_comparision" : wandb.plot.line_series( xs=epoch, ys=[f1_1, f1_2], keys=["1st approach", "2nd approach"], title="F1 score distance detection tasks", xname="epoch")}) wandb.init(project="diveintocode-grad-results-graph", entity="atien228", name="distance-val-f1-comp") wandb.log({"val_f1_comparision" : wandb.plot.line_series( xs=epoch, ys=[val_f1_1, val_f1_2], keys=["1st approach", "2nd approach"], title="Validation F1 score distance detection tasks", xname="epoch")}) ``` #### 5K model performance ``` run5k = api.run(f"atien228/diveintocode-grad-2nd-approach/{str(runs_df_2[runs_df_2['name']=='fivek-task-nofold'].iloc[0]['id'])}") epoch, acc_1, val_acc_1, loss_1, val_loss_1, f1_1, val_f1_1 = \ [], [], [], [], [], [], [] for i, row1 in run5k.history().iterrows(): epoch.append(row1["epoch"]) acc_1.append(row1['accuracy']) val_acc_1.append(row1['val_accuracy']) loss_1.append(row1['loss']) val_loss_1.append(row1['val_loss']) f1_1.append(row1['f1']) val_f1_1.append(row1['val_f1']) wandb.init(project="diveintocode-grad-results-graph", entity="atien228", name="5k-comp") wandb.log({"5k_comparision" : wandb.plot.line_series( xs=epoch, ys=[acc_1, val_acc_1, loss_1, val_loss_1, f1_1, val_f1_1], keys=["accuracy", "val_accuracy", "loss", "val_loss", "f1", "val_f1"], title="Metrics of 5k detection task", xname="epoch")}) ```
github_jupyter
<a href="https://colab.research.google.com/github/akash-kaul/ecosys/blob/master/GraphVisualize.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Set-Up ``` !pip install pyTigerGraph !pip install pandas !pip install flat-table import pyTigerGraph as tg import plotly.express as px import pandas as pd import flat_table graph = tg.TigerGraphConnection( host="https://61af4f31021c449e85f690cbec28ef7a.i.tgcloud.io", graphname="MyGraph", apiToken="r72kccg1jaso02s8gn20fskgfnh7brim") ``` # Print all Endpoints ``` results = graph.getEndpoints() print(results) ``` # Run Query ``` preInstalledResult = graph.runInstalledQuery("AuthorMostPubs", {}) parsR = (preInstalledResult) df = pd.DataFrame(parsR[0]) df1 = flat_table.normalize(df) df2 = df1.rename(columns={'Author.attributes.@pubNum':'Number of Publications', 'Author.attributes.author_name': 'Name'}) df3 = df2[['Name', 'Number of Publications']] df3 ``` # Display Results ``` fig = px.bar(df3, x="Name", y="Number of Publications", color='Number of Publications', labels={'Name': 'Author'}, height=400) # fig.show() ``` # Dash Setup ``` !wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip !unzip ngrok-stable-linux-amd64.zip get_ipython().system_raw('./ngrok http 8050 &') ! curl -s http://localhost:4040/api/tunnels | python3 -c \ "import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])" !pip install dash==1.12.0 !pip install dash-html-components==1.0.3 !pip install dash-core-components==1.10.0 !pip install dash-table==4.7.0 ``` # Import Dash ``` %%writefile dash_app.py import dash import dash_core_components as dcc import dash_html_components as html external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) colors = { 'background': '#111111', 'text': '#7FDBFF' } app.layout = html.Div(style={'backgroundColor': colors['background']}, children=[ html.H1( children='Hello Dash', style={ 'textAlign': 'center', 'color': colors['text'] } ), html.Div(children='Dash: A web application framework for Python.', style={ 'textAlign': 'center', 'color': colors['text'] }), dcc.Graph( id='example-graph-2', figure={ 'data': [ {'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar', 'name': 'SF'}, {'x': [1, 2, 3], 'y': [2, 4, 5], 'type': 'bar', 'name': u'Montréal'}, ], 'layout': { 'plot_bgcolor': colors['background'], 'paper_bgcolor': colors['background'], 'font': { 'color': colors['text'] } } } ) ]) if __name__ == '__main__': app.run_server(debug=True) !python dash_app.py ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/hdr_landsat.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/hdr_landsat.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Image/hdr_landsat.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/hdr_landsat.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.eefolium as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ``` Map = emap.Map(center=[40,-100], zoom=4) Map.add_basemap('ROADMAP') # Add Google Map Map ``` ## Add Earth Engine Python script ``` # Add Earth Engine dataset import datetime Map.setCenter(-95.738, 18.453, 9) # Filter the LE7 collection to a single date. collection = (ee.ImageCollection('LE7_L1T') .filterDate(datetime.datetime(2002, 11, 8), datetime.datetime(2002, 11, 9))) image = collection.mosaic().select('B3', 'B2', 'B1') # Display the image normally. Map.addLayer(image, {'gain': '1.6, 1.4, 1.1'}, 'Land') # Add and stretch the water. Once where the elevation is masked, # and again where the elevation is zero. elev = ee.Image('srtm90_v4') mask1 = elev.mask().eq(0).And(image.mask()) mask2 = elev.eq(0).And(image.mask()) Map.addLayer(image.mask(mask1), {'gain': 6.0, 'bias': -200}, 'Water: Masked') Map.addLayer(image.mask(mask2), {'gain': 6.0, 'bias': -200}, 'Water: Elev 0') ``` ## Display Earth Engine data layers ``` Map.addLayerControl() # This line is not needed for ipyleaflet-based Map. Map ```
github_jupyter
``` import IPython.display as ipd ``` [&larr; Back to Index](index.html) # Symbolic Representations **Symbolic music representations** comprise any kind of score representation with an explicit encoding of notes or other musical events. These include machine-readable data formats such as MIDI. Any kind of digital data format may be regarded as symbolic since it is based on a finite alphabet of letters or symbols. ## Piano-Roll Representations Around the late 19th and early 20th centuries, self-playing pianos called **player pianos** became popular. The input for these pianos is a continuous roll of paper with holes punched into it. This paper roll is called a **piano roll**. Performances by famous musicians such as Gustav Mahler, Edvard Grieg, Scott Joplin and George Gershwin have been recorded onto piano rolls. Similar to the player piano, the pianola is not a player piano but it sits in front of a piano. Here is a pianola in action: ``` ipd.display( ipd.YouTubeVideo("2A6ZXZwl3nA", start=106) ) ``` Today, a **piano-roll representation** generically refers to any visualization of note information resembling a piano roll. See below for examples of piano-roll representations by Stephen Malinowski. Here, the horizontal axis represents time, and the vertical axis represents pitch. ``` ipd.display( ipd.YouTubeVideo("LlvUepMa31o", start=15) ) ipd.display( ipd.YouTubeVideo("Kri2jWr08S4", start=11) ) ``` ## MIDI Representations Another symbolic representation is based on the **MIDI** standard ([Wikipedia](https://en.wikipedia.org/wiki/MIDI)), or Musical Instrument Digital Interface. The advent of MIDI in 1981-83 caused a rapid growth in the electronic musical instrument market. MIDI messages encode information for each note event such as the note onset, note offset, and intensity (represented as "velocity" in MIDI terminology). On computers, MIDI files contain a list of MIDI messages and other metadata. The **MIDI note number** is an integer between 0 and 127 that encodes the note's pitch. Most importantly, C4 (middle C) has MIDI note number 60, and A4 (concert A440) has MIDI note number 69. MIDI note numbers separated by 12 are separated by one octave, e.g. 72 = C5, 84 = C6, etc. The **key velocity** is an integer between 0 and 127 which controls the intensity of the sound. The **MIDI channel** is an integer between 0 and 15 which prompts the synthesizer to use a specific instrument. MIDI subdivides a quarter note into **clock pulses** or **ticks**. For example, if the number of pulses per quarter note (PPQN) is defined to be 120, then 60 ticks would represent the length of an eighth note. MIDI can also encode tempo in terms of **beats per minute** (BPM) thus allowing for absolute timing information. ## Score Representations **Score representations** encode explicit information about musical symbols such as clefs, time signatures, key signatures, notes, rests, dynamics, etc. However, score representations, the way we define it here, does not include any description of the final visual layout and positioning of these symbols on the page. For better or for worse, **MusicXML** has emerged as a universal format for storing music files for use among different music notation applications. Here is an excerpt of a MusicXML file: <measure number="2"> <note> <pitch> <step>B</step> <alter>-1</alter> <octave>4</octave> </pitch> <duration>1</duration> <voice>1</voice> <type>16th</type> <stem>down</stem> <beam number="1">begin</beam> <beam number="2">begin</beam> </note> <note> <pitch> <step>D</step> <octave>5</octave> </pitch> <duration>1</duration> <voice>1</voice> <type>16th</type> <stem>down</stem> <beam number="1">continue</beam> <beam number="2">continue</beam> </note> ... </measure> [&larr; Back to Index](index.html)
github_jupyter
This material is copied (possibily with some modifications) from the [Python for Text-Analysis course](https://github.com/cltl/python-for-text-analysis/tree/master/Chapters). # Chapter 6 - Core concepts of containers In the next chapters, we will introduce the most important containers in the Python language, which are **lists**, **sets**, **tuples**, and **dictionaries**. However, before we can introduce them, it's important that we present some things that they all share, which is hence the goal of this chapter. **At the end of this chapter, you will be able to understand the following concepts:** * positional arguments * keyword arguments * mutability **If you want to learn more about these topics, you might find the following links useful:** * [the Python glossary](https://docs.python.org/3/glossary.html): please look for the terms *immutable* and *argument* If you have **questions** about this chapter, contact Cody in the Slack group. ## 1. Containers When working with data, we use different python objects (which we call **containers**) to order data in a way that is convenient for the task we are trying to solve. Each of the following container types has different advantages for storing and accessing data (which you will learn about in the following chapters): * lists * tuples * sets * dictionaries Each of the container types can be manipulated using different methods and functions, for instance, allowing us to add, access, or remove data. It is important that you understand those. ``` # Some examples (you do not have to remember this now): a_list = [1,2,3, "let's", "use", "containers"] a_tuple = (1, 2, 3, "let's", "use", "containers") a_set = {1, 2, 3, "let's", "use", "containers"} a_dict = {1:"let's", 2:"use", 3: "containers"} #print(a_list) #print(a_tuple) #print(a_set) #print(a_dict) ``` ## 2. Positional arguments (args) and keyword arguments (kwargs) Args and kwargs are basically used to specify what a function or method is supposed to do. Therefore, a good understanding of the terms **args** and **kwargs** is important for the use of functions and methods in Python. Let's look at some string method examples from the last topic: ``` a_string = 'hello world' print('example 1. upper method:', a_string.upper()) print('example 2. count method:', a_string.count('l')) print('example 3. replace method:', a_string.replace('l', 'b')) print('example 4. split method:', a_string.split()) print('example 5. split method:', a_string.split(sep='o')) ``` `'l'` in example 2 is a positional argument. `sep='o'` in example 5 is an example of a keyword argument. Let's analyze the examples. | example | method | positional arguments (args) | keyword arguments (kwargs) | |---------|---------| -----------------|----------------------------| | `1` | upper | 0 | 0 | | `2` | count | 1 | 0 | | `3` | replace | 2 | 0 | | `4` | split | 0 | 0 | | `5` | split | 0 | 1 | This might look a bit confusing because sometimes methods have positional arguments and/or keyword arguments, and sometimes they do not. Luckily Python has a built-in function **help**, which provides us insight into how to use each method. ``` help(str.upper) ``` we learn that **str.upper** takes no positional arguments and no keyword arguments (nothing between the parentheses) and returns a string. ``` help(str.count) ``` we learn that **str.count** takes one positional argument (*sub*) and returns an integer (-> int). You can ignore the information between square brackets for now. ``` help(str.replace) ``` we learn that **str.replace** takes two positional arguments (*old* and *new*) and one keyword argument (*count*). It returns a string. ``` help(str.split) ``` Now it becomes interesting. **str.split** has no positional arguments and two keyword arguments (*sep* and *maxsplit*). The method returns a list of strings. ## 3. Difference between positional arguments (args) and keyword arguments (kwargs) * Positional arguments (args) are **compulsory** in order to call a method. * Keyword arguments (kwargs) are **optional**. They can be optional since they usually have a **default** value. By using the keyword argument, you simply change the default value to another value. For example, if we call a method that needs a positional argument without any, we get an error: ``` a_string = 'hello world' a_string.count() ``` However, if we do not provide a value for keyword arguments, we do not get an error: ``` a_string = 'hello world' a_string.split() ``` ## 4. Mutability Hopefully, it will become clear in the following chapters what we mean by **mutability**. For now, you can think of it in terms of 'can I change the data?'. Please remember the following categories for the subsequent chapters: | **immutable** | **mutable** | |-----------------|-------------| | integer | list | | string | set | | - | dictionary | # Exercises Please find some exercises about core concepts of python containers below. ### Exercise 1: Use the help function to figure out what the string methods below are doing. Then analyze how many positional and keyword arguments are used in the following examples: ``` print(a_string.lower()) print(a_string.strip()) print(a_string.strip('an')) print(a_string.partition('and')) ``` ### Exercise 2: Please illustrate the difference between ARGS and KWARGS using the example of string methds. Feel free to use dir(str) and the help function for inspiration. ``` # your examples here ```
github_jupyter
# Exemplo 05: Regras de Associação ## Regras de associação em compras em supermercado Regras de associação são usadas para descobrir elementos que ocorrem em comum dentro de um determinado conjunto de dados e suas possiveis associações. As regras de Associação têm como premissa básica encontrar elementos que implicam na ocorrencia de outros elementos em uma mesma transação, ou seja, encontrar relacionamentos ou padrões frequentes entre conjuntos de dados. O termo transação indica quais itens foram consultados em uma determinada operação de consulta. Um exemplo clássico é estabelecer associação de compra de produtos por um consumidor, isto é, se o cliente compra um determinado produto, quais outros produtos ele tende a comprar também. Essa técnica é largamente utilizada em supermercados e lojas de varejo. No Spark é implementado o algoritmo FP-Growth que é a implementação paralela do algoritmo *a priori*. ### FP-Growth FP-Growth is a type of "a priori" algorithm to mine frequent itemsets. The Spark implementation use the parallel FP-growth algorithm described in *Li et al.*, **PFP: Parallel FP-Growth for Query Recommendation** [LI2008](http://dx.doi.org/10.1145/1454008.1454027). PFP distributes computation in such a way that each worker executes an independent group of mining tasks. ``` # Find Spark executable import findspark findspark.init() # Load libraries import pyspark from pyspark.sql import SparkSession from pyspark.sql.functions import split from pyspark.ml.fpm import FPGrowth import time start_time = time.time() ``` ## Parameters configuration ``` # Path to dataset file data_path='./data/' ``` ## Creating Spark environment ``` # Create Spark Session spark = SparkSession.builder \ .master("local[*]") \ .appName("AssociationRule") \ .getOrCreate() ``` ## Reading Data ``` # Read the list of products by customer (csv) and transform to a list of vectors data = (spark.read .text(data_path+"groceries.csv.gz") .select(split("value", ",").alias("items"))) data.show(truncate=False) ``` ## Associative Rule: Frequent Pattern Mining Mining frequent items, itemsets, subsequences, or other substructures is usually among the first steps to analyze a large-scale dataset ### Set FPGrowth algorithm: **itemsCol** = Name of items collumn. Not needed if it is only one. **minConfidence** = Minimal confidence for generating Association Rule. [0.0, 1.0]. minConfidence will not affect the mining for frequent itemsets, but will affect the association rules generation. **minSupport** = Support says how popular an itemset is, as measured by the proportion of transactions in which an itemset appears. Minimal support level of the frequent pattern. [0.0, 1.0]. Any pattern that appears more than (minSupport * size-of-the-dataset) times will be output in the frequent itemsets. **numPartitions** = Number of partitions (at least 1) used by parallel FP-growth. By default the param is not set, and partition number of the input dataset is used. ``` fpGrowth = FPGrowth(itemsCol="items", minSupport=0.05, minConfidence=0.1) fi = fpGrowth.fit(data) # Display frequent itemsets. fi.freqItemsets.sort('freq', ascending=False).show(truncate=False) ``` ### Display generated association rules **Antecedent:** Antecedent itens. **Consequent:** Consequent itens. **Confidence:** This says how likely item Y is purchased when item X is purchased, expressed as {X -> Y}. This is measured by the proportion of transactions with item X, in which item Y also appears. **Lift:** This says how likely item Y is purchased when item X is purchased, while controlling for how popular item Y is. Lift avoid the item popularity which affects confidence. A lift value greater than 1 means that item Y is likely to be bought if item X is bought, while a value less than 1 means that item Y is unlikely to be bought if item X is bought. ``` fi.associationRules.sort('confidence', ascending=False).show(truncate=False) ``` ### Verify the rules against dataset Transform examines the input items against all the association rules and summarize the consequents as prediction. ``` fi.transform(data).show(truncate=False) spark.stop() print("--- Execution time: %s seconds ---" % (time.time() - start_time)) ```
github_jupyter
# Insights into Forest Fires ## Data Visualisation ## Loading libraries ``` import matplotlib.pyplot as plt import math import numpy as np import pandas as pd import random # # importing sklearn libraries # from sklearn import neural_network, linear_model, preprocessing, svm, tree # from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier # from sklearn.metrics import accuracy_score, mean_squared_error, r2_score # from sklearn.model_selection import train_test_split # from sklearn.model_selection import cross_val_score # from sklearn.model_selection import KFold # from sklearn.naive_bayes import GaussianNB # importing keras libraries #from keras.models import Sequential #from keras.layers import Dense #from keras.wrappers.scikit_learn import KerasRegressor import warnings # supressing the warning on the usage of Linear Regression model warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd") ``` ## Working with the dataset ### Loading the dataset The dataset can be downloaded from: https://www.kaggle.com/elikplim/forest-fires-data-set The attributes in the dataset include: 1. X - x-axis spatial coordinate within the Montesinho park map: 1 to 9 2. Y - y-axis spatial coordinate within the Montesinho park map: 2 to 9 3. month - month of the year: "jan" to "dec" 4. day - day of the week: "mon" to "sun" 5. FFMC - FFMC index from the FWI system: 18.7 to 96.20 6. DMC - DMC index from the FWI system: 1.1 to 291.3 7. DC - DC index from the FWI system: 7.9 to 860.6 8. ISI - ISI index from the FWI system: 0.0 to 56.10 9. temp - temperature in Celsius degrees: 2.2 to 33.30 10. RH - relative humidity in %: 15.0 to 100 11. wind - wind speed in km/h: 0.40 to 9.40 12. rain - outside rain in mm/m2 : 0.0 to 6.4 13. area - the burned area of the forest (in ha): 0.00 to 1090.84 ``` forest_fires = pd.read_csv('/home/ryan/Learning-Forest-Fires/forest_fires.csv') forest_fires.head(4) ``` ### Converting the labels under month and day to integers ``` forest_fires.month.replace(('jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec'),(1,2,3,4,5,6,7,8,9,10,11,12), inplace=True) forest_fires.day.replace(('mon','tue','wed','thu','fri','sat','sun'),(1,2,3,4,5,6,7), inplace=True) ``` ### Statistical analysis of dataset ``` forest_fires.describe() ``` ### Corelation analysis for the dataset ``` forest_fires.corr() ``` ### Extracting features from the dataset ``` x_values = list(forest_fires['X']) y_values = list(forest_fires['Y']) loc_values = [] for index in range(0, len(x_values)): temp_value = [] temp_value.append(x_values[index]) temp_value.append(y_values[index]) loc_values.append(temp_value) month_values = list(forest_fires['month']) day_values = list(forest_fires['day']) ffmc_values = list(forest_fires['FFMC']) dmc_values = list(forest_fires['DMC']) dc_values = list(forest_fires['DC']) isi_values = list(forest_fires['ISI']) temp_values = list(forest_fires['temp']) rh_values = list(forest_fires['RH']) wind_values = list(forest_fires['wind']) rain_values = list(forest_fires['rain']) area_values = list(forest_fires['area']) attribute_list = [] for index in range(0, len(x_values)): temp_list = [] temp_list.append(x_values[index]) temp_list.append(y_values[index]) temp_list.append(month_values[index]) temp_list.append(day_values[index]) temp_list.append(ffmc_values[index]) temp_list.append(dmc_values[index]) temp_list.append(dc_values[index]) temp_list.append(isi_values[index]) temp_list.append(temp_values[index]) temp_list.append(rh_values[index]) temp_list.append(wind_values[index]) temp_list.append(rain_values[index]) attribute_list.append(temp_list) ``` ### Counting the instances of location points in dataset ``` def count_points(x_points, y_points, scaling_factor): count_array = [] for index in range(0, len(x_points)): temp_value = [x_points[index], y_points[index]] count = 0 for value in loc_values: if(temp_value == value): count = count + 1 count_array.append(count * scaling_factor ) return count_array ``` ### Histogram plotting function for dataset ``` def histogram_plot(dataset, title): plt.figure(figsize=(8, 6)) ax = plt.subplot() ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() plt.title(title, fontsize = 22) plt.hist(dataset, edgecolor='black', linewidth=1.8) ``` ### Scatter plot for the locations ``` plt.figure(figsize=(8, 6)) ax = plt.subplot() ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() plt.title("Fire location plot", fontsize = 22) plt.scatter(x_values, y_values, s = count_points(x_values, y_values, 25), alpha = 0.3) plt.show() ``` ### Plotting the distribution of values for the dataset ``` histogram_plot(forest_fires['month'], title = "Month distribution") plt.show() histogram_plot(day_values, title = "Day distribution") plt.show() histogram_plot(ffmc_values, title = "FFMC distribution") plt.show() histogram_plot(dmc_values, title = "DMC distribution") plt.show() histogram_plot(dc_values, title = "DC distribution") plt.show() histogram_plot(isi_values, title = "ISI distribution") plt.show() histogram_plot(temp_values, title = "Temperature distribution") plt.show() histogram_plot(rh_values, title = "RH distribution") plt.show() histogram_plot(wind_values, title = "Wind distribution") plt.show() histogram_plot(rain_values, title = "Rain distribution") plt.show() histogram_plot(area_values, title = "Burned area distribution") plt.show() ```
github_jupyter
# Profiling PyTorch Multi GPU Multi Node Training Job with Amazon SageMaker Debugger This notebook will walk you through creating a PyTorch training job with the SageMaker Debugger profiling feature enabled. It will create a multi GPU multi node training. ### Install sagemaker and smdebug To use the new Debugger profiling features, ensure that you have the latest versions of SageMaker and SMDebug SDKs installed. The following cell updates the libraries and restarts the Jupyter kernel to apply the updates. ``` import sys import IPython install_needed = True # should only be True once if install_needed: print("installing deps and restarting kernel") !{sys.executable} -m pip install -U sagemaker !{sys.executable} -m pip install -U smdebug IPython.Application.instance().kernel.do_shutdown(True) ``` ## 1. Create a Training Job with Profiling Enabled<a class="anchor" id="option-1"></a> You will use the standard [SageMaker Estimator API for PyTorch ](https://sagemaker.readthedocs.io/en/stable/frameworks/tensorflow/sagemaker.pytorch.html) to create training jobs. To enable profiling, create a `ProfilerConfig` object and pass it to the `profiler_config` parameter of the `PyTorch` estimator. ### Define hyperparameters Define hyperparameters such as number of epochs, batch size, and data augmentation. You can increase batch size to increases system utilization, but it may result in CPU bottlneck problems. Data preprocessing of a large batch size with augmentation requires a heavy computation. You can disable data_augmentation to see the impact on the system utilization. For demonstration purpose, the following hyperparameters are prepared to increase CPU usage, leading to GPU starvation. ``` hyperparameters = {"training_script":"pt_res50_cifar10_distributed.py", "nproc_per_node":4, "nnodes":2} ``` ### Configure rules We specify the following rules: - loss_not_decreasing: checks if loss is decreasing and triggers if the loss has not decreased by a certain persentage in the last few iterations - LowGPUUtilization: checks if GPU is under-utilizated - ProfilerReport: runs the entire set of performance rules and create a final output report with further insights and recommendations. ``` from sagemaker.debugger import Rule, ProfilerRule, rule_configs rules=[ Rule.sagemaker(rule_configs.loss_not_decreasing()), ProfilerRule.sagemaker(rule_configs.LowGPUUtilization()), ProfilerRule.sagemaker(rule_configs.ProfilerReport()), ] ``` ### Specify a profiler configuration The following configuration will capture system metrics at 500 milliseconds. The system metrics include utilization per CPU, GPU, memory utilization per CPU, GPU as well I/O and network. Debugger will capture detailed profiling information from step 5 to step 15. This information includes Horovod metrics, dataloading, preprocessing, operators running on CPU and GPU. ``` from sagemaker.debugger import ProfilerConfig, FrameworkProfile profiler_config = ProfilerConfig( system_monitor_interval_millis=500, framework_profile_params=FrameworkProfile(start_step=5, num_steps=10) ) ``` ### Get the image URI The image that we will is dependent on the region that you are running this notebook in. ``` import boto3 session = boto3.session.Session() region = session.region_name image_uri = f"763104351884.dkr.ecr.{region}.amazonaws.com/pytorch-training:1.6.0-gpu-py36-cu110-ubuntu18.04" ``` ### Define estimator To enable profiling, you need to pass the Debugger profiling configuration (`profiler_config`), a list of Debugger rules (`rules`), and the image URI (`image_uri`) to the estimator. Debugger enables monitoring and profiling while the SageMaker estimator requests a training job. ``` import sagemaker from sagemaker.pytorch import PyTorch estimator = PyTorch( role=sagemaker.get_execution_role(), image_uri=image_uri, instance_count=2, instance_type='ml.p3.8xlarge', source_dir='entry_point', entry_point='distributed_launch.py', hyperparameters=hyperparameters, profiler_config=profiler_config, rules=rules ) ``` ### Start training job The following `estimator.fit()` with `wait=False` argument initiates the training job in the background. You can proceed to run the dashboard or analysis notebooks. ``` estimator.fit(wait=False) ``` ## 2. Analyze Profiling Data Copy outputs of the following cell (`training_job_name` and `region`) to run the analysis notebooks `profiling_generic_dashboard.ipynb`, `analyze_performance_bottlenecks.ipynb`, and `profiling_interactive_analysis.ipynb`. ``` training_job_name = estimator.latest_training_job.name print(f"Training jobname: {training_job_name}") print(f"Region: {region}") ``` While the training is still in progress you can visualize the performance data in SageMaker Studio or in the notebook. Debugger provides utilities to plot system metrics in form of timeline charts or heatmaps. Checkout out the notebook [profiling_interactive_analysis.ipynb](analysis_tools/profiling_interactive_analysis.ipynb) for more details. In the following code cell we plot the total CPU and GPU utilization as timeseries charts. To visualize other metrics such as I/O, memory, network you simply need to extend the list passed to `select_dimension` and `select_events`. ``` from smdebug.profiler.analysis.notebook_utils.training_job import TrainingJob tj = TrainingJob(training_job_name, region) tj.wait_for_sys_profiling_data_to_be_available() from smdebug.profiler.analysis.notebook_utils.timeline_charts import TimelineCharts system_metrics_reader = tj.get_systems_metrics_reader() system_metrics_reader.refresh_event_file_list() view_timeline_charts = TimelineCharts(system_metrics_reader, framework_metrics_reader=None, select_dimensions=["CPU", "GPU"], select_events=["total"]) ``` ## 3. Download Debugger Profiling Report The profiling report rule will create an html report `profiler-report.html` with a summary of builtin rules and recommenades of next steps. You can find this report in your S3 bucket. ``` rule_output_path = estimator.output_path + estimator.latest_training_job.job_name + "/rule-output" print(f"You will find the profiler report in {rule_output_path}") ``` For more information about how to download and open the Debugger profiling report, see [SageMaker Debugger Profiling Report](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-profiling-report.html) in the SageMaker developer guide.
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/NAIP/ndwi_single.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/ndwi_single.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=NAIP/ndwi_single.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/ndwi_single.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.eefolium as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ``` Map = emap.Map(center=[40,-100], zoom=4) Map.add_basemap('ROADMAP') # Add Google Map Map ``` ## Add Earth Engine Python script ``` # Add Earth Engine dataset ``` ## Display Earth Engine data layers ``` Map.addLayerControl() # This line is not needed for ipyleaflet-based Map. Map ```
github_jupyter
# Problem: Sourcing Fine-Grained Mobility Data from Large Datasets ### Overview Finding fine-grained, agent-specific mobility data is very difficult to do. It is extremely important to see how an individual acts, as we can see from various clusters of the COVID-19 pandemic that we have experienced in New Zealand. It would be extremely useful to analyze the mobility of individual people, finding their clusters of movement. A more in-depth overview of the problem, with supporting literature, can be found in the accompanying report. ### The Data Mobility data will be obtained from the GeoLife dataset. This data is available for free [here](https://www.microsoft.com/en-us/download/details.aspx?id=52367). It was collected by Microsoft Asia and consists of GPS trajectories from 182 users over a period of three years. You can download it and load it through the cells that follow. This system depends on data in a PostGIS server - don't worry, this will set that up for you, you will just need a PgAdmin server running with default 'postgres' credentials. ### The Proposed Solution A user-friendly analysis system for looking at individual users of the dataset and finding patterns. This system should be approachable, web-based and capable of displaying clusters of mobility. It will also deliver key statistics on the data so that the user can validate the results they are seeing. To this end, it will be open-source. ### Structure **This Notebook consists of two parts**. The first series of cells go through the project requirement checklist, running each step individually with some maps along the way. They show how the data can be uploaded to PgAdmin etc. The final cell is a 'dashboard' that does all of the analysis of the previous steps, all contained. Code in this final cell is not very well commented as I have done this thoroughly in the cells that preceed it. There are also bugs that exist in the following cells that I have fixed for the final version, so please judge program functionality through the dashboard. To be clear, **sequential cells outline basic functionality and have known bugs that have been fixed for the dashboard, so please run the final cell to assess program functionality** Thanks :) ``` # import modules # used for: import psycopg2 # sql implementation from sqlalchemy import create_engine # connection to postgis import pandas as pd # tabular data handling import geopandas as gpd # tabular data handling with geometry (spatial) import folium # map creation (build on leaflet) import geoalchemy2 # support for spatial sql import matplotlib.pyplot as plt # plotting tool import numpy as np # used for getting statistics and complex numerical analyses from ipyleaflet import * # ipython leaflet extension from ipywidgets import * # ipython widgets (buttons, etc.) import glob # used for getting path names (for finding the geolife folder) from IPython.display import display # display function for inline plots etc. import pandas as pd # dataframes import datetime # getting datetime from delimited data import os # use local functions and getting system info import psutil # getting RAM of computer for performance recommendations import seaborn as sns # matplotlib, but fancy import random # random number generation from sklearn.cluster import DBSCAN # clustering algorithm from sklearn.neighbors import NearestNeighbors # neigbourhoods used for clustering from kneed import KneeLocator # locating knee point of clustering algorithm # define constants SHOW_EXCEPTIONS = True # bool for debugging exceptions DATA_EXISTS = True # bool for not reloading data TB_RAW = 'geolife_raw_data' # constant table name TB_CLEAN = 'geolife_ref' # "" TB_BEIJING = 'beijing_raw' # "" ENGINE = create_engine('postgresql://postgres:postgres@localhost/postgres') # postgres engine CON = ENGINE.connect() # engine connection CRS = '4326' # coordinate reference system BEIJING = [39.9, 116.41] # central beijing coords, for map centres B1 = 115.779419, 39.626846 # bbox limits for beijing extent B2 = 116.952209, 40.357010 # "" # method for running certain queries and handling exceptions def runsql(query): try: ENGINE.execute(query) except Exception as exep: if SHOW_EXCEPTIONS: print(exep) # The following code was modified by code developed by HERE Technologies # The original can be found: https://heremaps.github.io/pptk/tutorials/viewer/geolife.html # My edits: # -made it a class, added getters and setters (to meet assignment guidelines) # -added customisation by user (eg. custom file locations without constants) # -general cleanup of code, added nicer progress indicator, commented class GeoLifeHandler: def __init__(self): # apply numeric values for transport modes to save space self.mode_names = ['walk', 'bike', 'bus', 'car', 'subway','train', 'airplane', 'boat', 'run', 'motorcycle', 'taxi'] self.mode_ids = {s : i + 1 for i, s in enumerate(self.mode_names)} # create progress bar self.progress_bar = widgets.FloatProgress( value=0, min=0, max=10.0, description='Loading:', bar_style='info', style={'bar_color': '#0000FF'}, orientation='horizontal') # read plt file def read_plt(self, plt_file): points = pd.read_csv(plt_file, skiprows=6, header=None, parse_dates=[[5, 6]], infer_datetime_format=True) points.rename(inplace=True, columns={'5_6': 'time', 0: 'lat', 1: 'lon', 3: 'alt'}) # rename cols points.drop(inplace=True, columns=[2, 4]) # remove unused columns return points # read labels (where applicable) def read_labels(self, labels_file): labels = pd.read_csv(labels_file, skiprows=1, header=None, parse_dates=[[0, 1], [2, 3]], infer_datetime_format=True, delim_whitespace=True) labels.columns = ['start_time', 'end_time', 'label'] labels['label'] = [self.mode_ids[i] for i in labels['label']] # enumerate return labels # apply travel mode labels to points (enumerated to save bits) def apply_labels(self, points, labels): indices = labels['start_time'].searchsorted(points['time'], side='right') - 1 no_label = (indices < 0) | (points['time'].values >= labels['end_time'].iloc[indices].values) points['label'] = labels['label'].iloc[indices].values points['label'][no_label] = 0 # read individual user (folder is preassigned to each user) def read_user(self, user_folder): labels = None plt_files = glob.glob(os.path.join(user_folder, 'Trajectory', '*.plt')) df = pd.concat([self.read_plt(f) for f in plt_files]) labels_file = os.path.join(user_folder, 'labels.txt') if os.path.exists(labels_file): labels = self.read_labels(labels_file) self.apply_labels(df, labels) else: df['label'] = 0 return df # interate through all users (defined by folders in Data folder) def read_all_users(self): subfolders = os.listdir(self.folder) dfs = [] self.progress_bar.max = len(subfolders) display(self.progress_bar) for i, sf in enumerate(subfolders): #print('processing user {} of {}'.format(i + 1, len(subfolders))) self.progress_bar.value = i + 1 df = self.read_user((os.path.join(self.folder,sf))) df['user'] = int(sf) dfs.append(df) print('Load Complete') self.geolife_df = pd.concat(dfs) # set folder location def set_location(self, file): self.folder = file # get dataframe def get_df(self): return self.geolife_df # function for submitting file location # starts the reading of all users, saving to geodataframe def bt_submitevent(b): if os.path.exists(file_options.value): b.disabled = True handler.set_location(file_options.value) handler.read_all_users() else: print('Data file not found. Please try again...') # submit button for selecting the option, calling the above function when selected bt_submit = widgets.Button(description='Select') bt_submit.on_click(bt_submitevent) # list of file options file_options = widgets.Dropdown( options=[(os.path.join(os.getcwd(), 'geolife_trajectories\Data')), # default zip file location (os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop\Data')), # desktop folder (os.path.join(os.path.join(os.environ['USERPROFILE']), 'Downloads\Data'))], # downloads folder disabled=False, layout={'width': '70%'}) # display these widgets, with title display(VBox([HTML('<h4>GeoLife File Location</h4>'), HBox([file_options, bt_submit])])) # create instance of handler class handler = GeoLifeHandler() # try and get the dataframe information # good test if the dataframe exists. if it doesn't, assume it is already available on server (see constants) try: geolife_raw = handler.get_df() display(geolife_raw.count()) display(geolife_raw.head(10)) geolife_raw = geolife_raw.head(10) except: print('Data either not yet extracted, or already available on server') # class for uploading to postgis and setting data limitations based on memory available class PostGISUploader: def __init__(self): # use constants for table names etc. self.table_rawdata = TB_RAW self.table_cleandata = TB_CLEAN self.crs = CRS # buttons for setting limitations on heatmap self.buttons = [ widgets.Button(description='100,000'), widgets.Button(description='1,000,000'), widgets.Button(description='10,000,000'), widgets.Button(description='No Limitation') ] # assign the set_limit function to all buttons (will change output based on value) for button in self.buttons: button.on_click(self.set_limit) self.limit = 'LIMIT 100' # default limit # get ram of computer self.pc_ram = round(psutil.virtual_memory().total / 1073741824, 1) # upload data to postgis. THIS TAKES A VERY LONG TIME (mine took ~14 hours) def upload(self, geolife_df): # create 'raw data' table. this will include all attributes of the dataset runsql('DROP TABLE IF EXISTS {}'.format(self.table_rawdata)) runsql(('CREATE TABLE {}' '(gid serial PRIMARY KEY, time timestamp, lat float, lon float, alt float, label int, user_id int)' .format(self.table_rawdata))) # data must be inserted row by row. using 'to_postgis' will crash as it uses too much memory for row in geolife_df.iterrows(): cur_row = row[1] runsql(("INSERT INTO {} (time, lat, lon, alt, label, user_id) VALUES ('{}', {}, {}, {}, {}, {})" .format(self.table_rawdata, cur_row['time'], cur_row['lat'], cur_row['lon'], cur_row['alt'], cur_row['label'], cur_row['user']))) # create 'clean' table. will treat the previous table as a backup # this table has a geometry ref instead of lat/lon runsql('DROP TABLE IF EXISTS {}'.format(self.table_cleandata)) runsql(('CREATE TABLE {} AS SELECT gid, time, st_setsrid(st_makepoint(lon, lat), {}) ' 'as geom, label, user_id FROM {}'.format(self.table_cleandata, self.crs, self.table_rawdata))) # disable buttons, get limitation based on the button value (with some string formatting) def set_limit(self, b): for button in self.buttons: button.disabled = True self.limit = b.description.replace(',', '') if self.limit == 'No Limitation': self.limit = '' else: self.limit = 'LIMIT ' + self.limit # get a message based on the amount of RAM your computer has def get_limitoptions(self): if self.pc_ram < 4: message = 'This is quite low (no offense). Recommended: 100,000 row limitation' elif 4 < self.pc_ram < 8: message = 'This is acceptable. Recommended: 1,000,000 row limitation' elif 8 < self.pc_ram < 16: message = 'This is good, but still limited. Recommended: 10,000,000 row limitation' else: message = 'You can handle anything. Try going with the whole dataset!' return message # return buttons def get_buttons(self): return self.buttons # return limit suffix def get_limit(self): return self.limit # return pc ram def get_ram(self): return self.pc_ram # create object instance. this is needed even if data is uploaded already to get limitation and ram etc. uploader = PostGISUploader() # start the data upload process if the data has not already been uploaded (constant = false) if not DATA_EXISTS: uploader.upload(geolife_raw) # class for dealing with folium maps class FoliumCreator: # set default values def __init__(self, zoom=10, size='600px', center=BEIJING, scale_pos='topleft'): self.zoom = zoom self.size = size self.center = center # using American english spelling as that's what leaflet uses, not bc I like it :( self.layout = Layout(height=self.size) self.scale_pos = scale_pos # set map layout def set_layout(self, layout): self.layout = layout # set zoom level def set_zoom(self, level): self.zoom = level # return map object def get_map(self): m = Map(center=self.center, zoom=self.zoom, layout=self.layout, basemap=basemaps.Stamen.Terrain) self.set_control(m, ScaleControl(position=self.scale_pos)) return m # set control (eg. scale bar) def set_control(self, instance, control): instance.add_control(control) # set new layer def set_layer(self, instance, layer): instance.add_layer(layer) ``` ### Showing our Area of Interest A map of Beijing without any data. Basemaps show the topography and cityscape. We have chosen Beijing as most of the GeoLife data has telemetry from this city. ``` ''' MAP 1: Area of Interest -no context, just area -zoomed into centre based on constants -uses two basemaps to show urban and environment ''' # create mapcontroller instance, return a map object mapcontroller = FoliumCreator() map1 = mapcontroller.get_map() # add basemap layers right_layer = basemap_to_tiles(basemaps.OpenTopoMap) # OpenTopMap left_layer = basemap_to_tiles(basemaps.Stamen.Terrain) # Stamen Terrain (my fav) # add splitmap controller to map split_control = SplitMapControl(left_layer=left_layer, right_layer=right_layer) mapcontroller.set_control(map1, split_control) # show map display(map1) ``` ### Mapping the Raw Data Now that we have uploaded all of our data to a server we can safely manipulate it. Again, we will focus on Beijing, China as this is where the majority of the data originate from. We will create a table that grabs all points that are within our defined 'bounding box'. Then, we can produce a heatmap of these points, coloured by density of points. This will show where most people moved around in Beijing on the dataset (or at least, where their phones reported location from the most). Note that despite limiting the extent, the data is still over 24 million rows, so this may take some time to process! ``` # print information and recommendation based on ram print(('This dataset is over 24 million rows. It requires high amounts of memory to process in its entirety. ' 'Detected {} gigabytes, not all of which will be dedicated to the interpreter. {}. Of course, this is a free' ' world, do what you want - but you have been warned.' .format(uploader.get_ram(), uploader.get_limitoptions()))) # add buttons from uploader object, display buttons buttons = uploader.get_buttons() display(VBox([HTML('<h4>Please select a data row limitation</h4>'), HBox([buttons[0], buttons[1], buttons[2], buttons[3]])])) ''' MAP 2: Movement Heatmap -focus on Beijing, create table of points within that extent -saves table in cache, which will use a fair bit of storage on your device (depending on limitation) ''' # create beijing table based on constant if not DATA_EXISTS: # drop if exists, just in case runsql('DROP TABLE IF EXISTS {}'.format(TB_BEIJING)) print('Existing table dropped (if it existed). Creating Beijing table. This can take several minutes...') # selects beijing area from a makeenvelope function (clip to extent essentially) runsql('CREATE TABLE {} AS SELECT * FROM {} WHERE geom && st_makeenvelope ({}, {}, {}, {}, {})' .format(TB_BEIJING, TB_CLEAN, B1[0], B1[1], B2[0], B2[1], CRS)) print('Table created.') # giving info like this as it can take a while, so good to show something is happening # get beijing table print('Querying server. This can take a while...') query = ('SELECT * FROM {} {}'.format(TB_BEIJING, uploader.get_limit())) # get geodataframe for heatmap (all beijing) gdf = gpd.GeoDataFrame.from_postgis(query, CON) print(gdf.count()) # get map, changing zoom mapcontroller.set_zoom(10) map2 = mapcontroller.get_map() # convert geometry from geodataframe into a list so that it can be used for ipyleaflet heatmap print('GeoDataFrame created. Converting values to produce heatmap...') geom_list = [(x, y) for x, y in zip(gdf['geom'].y, gdf['geom'].x)] heatmap = Heatmap(locations=geom_list, radius=8) # add heatmap to map object, display map map2.add_layer(heatmap) display(map2) ``` ### Finding Users We can see that, even when restricting to our city of interest, the dataset is far too big to handle. Let's restrict it further and focus in on a single user to have a look at the fine-grained patterns. Initially, I wrote an algorithm to find trips based on the label of transport changing, which worked by: - iterating through every user, grabbing all of their points from the server - checking if they have labels assigned to their travels - calculating their unique 'trips', assigned by the amount of times the label assigned to these trips changes, eg. walking for 10 minutes followed by a taxi ride will be defined as two trips - getting a user who has more than a defined minimum trip count, then using binary search to find the user who has the lowest amount of trips above this minimum However, this method has several flaws. As mentioned, not all users have labels assigned for their travels. Another is that two modes of transport may be considered part of a single trip. Instead, we can focus on user-level data, rather than trip-level data. We can focus on clusters of points rather than alternating transport methods, thus removing the reliance on the existence of labels (the algorithm I wrote found that over 70% of users have to labels). Overall, we can see that user-level data will be more effective than trip-level data as a way to handle the data. First, let's select a user to focus on. We will write the analysis in a way that it will work on any user that exists in the dataset. ``` # class for each individual user # instead of creating a new class instance each time a new user is chosen, this function can update based on user id # handles all the displays for interacting with users, as well as giving information to analysis tools class UserObject: # initialize with some defaults def __init__(self, dashboard_mode=False): self.id = -1 self.id_count = 181 self.dashboard_mode = dashboard_mode # depends on whether used in final version or inline # submit user button self.bt_submit_user = widgets.Button(description='Select') self.bt_submit_user.on_click(self.submit_user) # randomize user button self.bt_random_user = widgets.Button(description='Randomize') self.bt_random_user.on_click(self.randomize_user) # get random id for the first user self.first_user_id = self.get_random_id() # dropdown for all users self.user_options = widgets.Dropdown( value=self.first_user_id, options=self.get_all_ids(), disabled=False, layout={'width': '30%'}) # observed function for change of dropdown self.user_options.observe(self.user_change, 'value') # outputs self.dp_userinfo = widgets.Output(layout={'border': 'solid black 1px', 'max_width': '360px'}) self.clusterbox = widgets.Output(layout={'border': 'solid black 1px', 'max_width': '200px', 'min_width': '200px'}) # get user info for first user info self.get_user_info(self.first_user_id) # disables submit button for dashboard mode if self.dashboard_mode: self.bt_submit_user.disabled = True # display all widgets def display(self): display(HBox([HTML('<b>User: '), self.user_options, self.bt_random_user, self.bt_submit_user])) display(HBox([self.dp_userinfo, widgets.Output(layout={'min_width': '75px'}), self.clusterbox])) # submit user (non-dashboard). disables buttons, sets id based on selected value def submit_user(self, b): # 'b' acts as the button event/container b.disabled = True self.bt_random_user.disabled = True self.user_options.disabled = True self.set_id(self.user_options.value) print('User {} selected!'.format(self.get_id())) # get random user def randomize_user(self, b): self.user_options.value = self.get_random_id() # change user to value in dropdown (observing dropdown widget) def user_change(self, value): self.get_user_info(value.new) if self.dashboard_mode: self.set_id = value.new # set id def set_id(self, new_id): self.id = new_id # return id def get_id(self): return self.id # set what to display in 'clusterbox' (print output on the right to show clustering info) def set_clusterbox(self, content): with self.clusterbox: print(content) # get all possible id values def get_all_ids(self): return [i for i in range(0, self.id_count)] # get random id (random number generation) def get_random_id(self): return random.randint(0, self.id_count) # get user info (through db queries) def get_user_info(self, test_id): # show loading process self.bt_random_user.description = 'Loading...' self.bt_random_user.disabled = True # get info info = CON.execute('select count(*), count(distinct(label)), min(time), max(time) from {} where user_id = {}' .format(TB_BEIJING, test_id)).first() # clear any existing info self.dp_userinfo.clear_output() # print formatted summary based on query with self.dp_userinfo: print('User {} Summary\n--------------------------------------------'.format(test_id)) print(('Data points: {}\nUnique Travel Methods: {}\nData Begin: {}' '\nData End: {}'.format(info[0], info[1], info[2], info[3]))) # show loading process self.bt_random_user.description = 'Randomize' self.bt_random_user.disabled = False # create user object, display widgets user = UserObject() user.display() ``` The code below looks at individual user movement and filters these movements to make this fine-grained data easier to handle. We can look at their mobility in a less processing-intensive way by using the function ST_REMOVEREPEATEDPOINTS which does exactly what the name suggests. Repeated points are defined by a tolerance level (essentially a buffer around each point): ``` # get user information. query is based on collection of geometry (merge) gdf_raw = gpd.GeoDataFrame.from_postgis((('SELECT st_collect(geom) as all_geom, st_npoints(st_collect(geom)) ' 'as point_count from {} where user_id = {}') .format(TB_BEIJING, user.get_id())), CON, geom_col='all_geom') # give up if returns as none, giving some error info gdf_raw_n = str(gdf_raw['point_count'][0]) if gdf_raw_n == 'None': print("No points found. Please ensure you have clicked 'Select' above") else: print('Raw point count: ' + gdf_raw_n) # get simplified user information. removes repeated points as a simplification of the total user's mobility gdf_simple = gpd.GeoDataFrame.from_postgis((('SELECT st_removerepeatedpoints(st_collect(geom)) as simplified_geom, ' 'st_npoints(st_removerepeatedpoints(st_collect(geom), 0.001)) as point_count from {} where user_id = {}') .format(TB_BEIJING, user.get_id())), CON, geom_col='simplified_geom') gdf_simple_n = str(gdf_simple['point_count'][0]) print('Simplified point count: ' + gdf_simple_n) # plot output fig, axes = plt.subplots(1, 2, figsize=(15, 10)) # raw mobility trajectories axes[0].set_title('Original Trip Data for User {} (n={})'.format(user.get_id(), gdf_raw_n)) gdf_raw.plot(ax=axes[0]) axes[0].set_xlabel('Longitude') axes[0].set_ylabel('Latitude') # simplified mobility trajectories axes[1].set_title('Simplified Trip Data for User {} (n={})'.format(user.get_id(), gdf_simple_n)) gdf_simple.plot(ax=axes[1]) axes[1].set_xlabel('Longitude') axes[1].set_ylabel('Latitude') # show plots plt.show() ``` #### Validation of Simplification These two plots look very similar, but we yet can't be sure. We can confirm by testing that the simplified analysis falls entirely within the area of the raw dataset. We will also plot the two datasets together to make sure the movement trends are the same. This takes a bit of time as we need to perform the simplification, split the geometery into latitude and longitude so we can plot it and perform our ST_CONTAINS. Note that the plots will show some distortion as they are not assigned a coordinate reference system. For the purposes of this analysis, this does not matter as we are only concerned about their relative (as opposed to absolute) displacement. ``` # get raw data, split as x and y for plotting gdf_raw = gpd.GeoDataFrame.from_postgis((('SELECT st_x((st_dumppoints(st_collect(geom))).geom) as lat, ' 'st_y((st_dumppoints(st_collect(geom))).geom) as lon, (st_dumppoints(st_collect(geom))).geom as geom ' 'FROM {} WHERE user_id = {}'.format(TB_BEIJING, user.get_id()))), CON) # get simplified data, also split into x and y gdf_simple = gpd.GeoDataFrame.from_postgis((('SELECT ' 'st_x((st_dumppoints(st_removerepeatedpoints(st_collect(geom), 0.001))).geom) as lat, ' 'st_y((st_dumppoints(st_removerepeatedpoints(st_collect(geom), 0.001))).geom) as lon, ' '(st_dumppoints(st_removerepeatedpoints(st_collect(geom), 0.001))).geom as geom ' 'FROM {} WHERE user_id = {}'.format(TB_BEIJING, user.get_id()))), CON) # tests if the simplified data is completely contained by the raw data contains = CON.execute(('SELECT st_contains(st_collect(geom), st_removerepeatedpoints(st_collect(geom), 0.001)) ' 'FROM {} where user_id = {}'.format(TB_BEIJING, user.get_id()))).first()[0] # display some information. gives validation based on what the above query returns print(('\nNote that size of points is offset for aesthetics and has no meaning.' 'The ST_CONTAINS was run on the raw data. Also note that the difference in n between this and the previous ' 'plot is due to rounding and is inconsequential to our analysis\n')) if contains: print('Raw data completely contains simplified data.') else: print('Raw data does not contain simplified data.') # create new plot fig = plt.figure(figsize=(10, 8)) ax = fig.add_subplot(111) fig.figsize = (10, 10) # plot raw and simplified datasets atop one another ax.scatter(gdf_raw['lat'], gdf_raw['lon'], s=50, c='black', marker="o", label='Raw (n={})' .format(gdf_raw.count()[0])) # larger dots to show difference ax.scatter(gdf_simple['lat'], gdf_simple['lon'], s=2, c='orange', marker="o", label='Simplified (n={})' .format(gdf_simple.count()[0])) ax.set_ylabel('Latitude') ax.set_xlabel('Longitude') plt.legend(loc='lower left'); # show plot plt.show() ``` ### Pattern of User Movements We now have a set of telemetry from an individual user, compressed and validated to be more manageable. Let's do something with this data! #### Density-Based Spatial Clustering of Applications with Noise (DBSCAN) DBSCAN will help us find clusters in the data. These clusters will unveil information about the mobility of the user, showing patterns of movement that are clustered together which we can build inferences from. The sklearn package has all the tools we need to run a DBSCAN analysis, starting with calculating the distance of nearest-neighbour points. We can use these distances to find the 'knee point' on the 'elbow diagram' - the point where the distances of neighbours greatly increases (almost) asymptotically. This 'knee point' serves as the epsilon value in the DBSCAN analysis, where we can then classify points into their respective clusters. ``` # set sample parameter, get list of geometries, create figures for output samples = 20 geom_list = [(x, y) for x, y in zip(gdf_simple['lat'], gdf_simple['lon'])] fig, axes = plt.subplots(1, 2, figsize=(12, 5)) # get nearest neigbours nn = NearestNeighbors(n_neighbors=samples + 1) neighbors = nn.fit(geom_list) distances, indices = neighbors.kneighbors(geom_list) distances = np.sort(distances[:,samples], axis=0) # plot nearest neighbours axes[0].set_title('Nearest Neighbours Plot on User {} Mobility'.format(user.get_id())) axes[0].plot(distances) axes[0].set_xlabel('Points') axes[0].set_ylabel('Distances') # calculate knee point i = np.arange(len(distances)) knee = KneeLocator(i, distances, S=1, curve='convex', direction='increasing', interp_method='polynomial') # plot nearest neighbours with knee point axes[1].set_title('Knee Point of Nearest Neighbours for User {} Mobility'.format(user.get_id())) axes[1].plot(distances) axes[1].axvline(x=knee.knee,color='gray',linestyle='--') axes[1].text((axes[1].get_xlim()[1] * 0.5), (axes[1].get_ylim()[1] * 0.5), 'knee={}'.format(knee.knee)) axes[1].set_xlabel('Points') axes[1].set_ylabel('Distances') # create dbscan instance db = DBSCAN(eps=distances[knee.knee], min_samples=samples).fit(geom_list) # get labels (clustered) labels = db.labels_ # plot figure as scatterplot (seaborn) fig = plt.figure(figsize=(8, 8)) sns_scatter = sns.scatterplot(x=gdf_simple['lat'], y=gdf_simple['lon'], marker="o", hue=["{cluster}".format(cluster = 'Cluster ' + str(cur_label) if cur_label != -1 else 'Noise') for cur_label in labels]) sns_scatter.set(xlabel='Latitude', ylabel='Longitude', title='Clusters in User {} Mobility using DBSCAN\n(where -1 is noise)'.format(user.get_id())) # show plots plt.show() ``` #### Analyzing Mobility Patterns/Clusters Of course, the simplification will greatly impact the reliability of the clustering. However, it is not viable to load the entire dataset for every user in-situ for clustering as they are far too large. This is a solution that compromises processing time and quality of output. We now have clustered mobility patterns for a specific user. What information can we derive from this? First we will need to consolidate everything about the specific user into one table: ``` # create dataframe for manipulation, add clustering labels gdf_user = gdf_simple gdf_user['label'] = labels #gdf_user['time'] = 'datetime' # count with noise total_count = gdf_user.count()[0] # get locations of noise noise_index = gdf_user[gdf_user['label'] == -1].index # get counts, drop noise gdf_user_clean = gdf_user gdf_user_clean.drop(noise_index, inplace=True) clean_count = gdf_user_clean.count()[0] noise_count = total_count - clean_count # print info of noise, clusters, counts before and after dropping noise print('Out of {} points, {} were detected as noise ({}%). With noise removed, {} points remain with {} cluster(s)' .format(total_count, noise_count, round(noise_count / total_count * 100, 2), clean_count, len(gdf_user['label'].unique()))) ``` #### Visualizing Mobility Patterns We now have a clean, simplifed dataset. GPS trajectories will be plotted along paths so that we can see exactly where the user moves within clusters. ``` # Antpath plot # here we can see the user's mozement along a moving path colours = ['red', 'green', 'blue', 'orange', 'purple', 'pink', 'brown', 'black', 'cyan', 'gold', 'white', 'crimson', 'olive'] mapcontroller.set_zoom(9) map2 = mapcontroller.get_map() by_cluster = gdf_user_clean.groupby('label') by_cluster = dict(list(by_cluster)) for cluster in by_cluster.keys(): current_gdf = by_cluster[cluster] geom_list = [(x, y) for x, y in zip(current_gdf['lon'], current_gdf['lat'])] ant_path = AntPath(locations=geom_list, dash_array=[1, 10], delay=3000, color=colours[cluster], pulse_color='black') mapcontroller.set_layer(map2, ant_path) display(map2) ``` ### User Interface We have now outlined the methods required to get fine-grained mobility analysis on this GeoLife dataset. We will finish by combining these methods into a single interface so that those less familiar with GISytems can access this data. This following cell, aside from the collation of data and the requirement of third-party packages, should function independently from the rest of the Notebook. This means it will expect an existing dataset uploaded to PostGIS, which, if you have run the previous cells, should be satisfied. ``` ''' GeoLife Mobility Dashboard -allows the selection of any user, displaying their DBSCAN clusters and trajectories -based on code above, with a lot of class modification (copied so that it can be run independently)) -for in-depth commenting, see the cells above -can be run independently (along with imports) ''' # import modules # used for: import psycopg2 # sql implementation from sqlalchemy import create_engine # connection to postgis import pandas as pd # tabular data handling import geopandas as gpd # tabular data handling with geometry (spatial) import folium # map creation (build on leaflet) import geoalchemy2 # support for spatial sql import matplotlib.pyplot as plt # plotting tool import numpy as np # used for getting statistics and complex numerical analyses from ipyleaflet import * # ipython leaflet extension from ipywidgets import * # ipython widgets (buttons, etc.) import glob # used for getting path names (for finding the geolife folder) from IPython.display import display # display function for inline plots etc. import pandas as pd # dataframes import datetime # getting datetime from delimited data import os # use local functions and getting system info import psutil # getting RAM of computer for performance recommendations import seaborn as sns # matplotlib, but fancy import random # random number generation from sklearn.cluster import DBSCAN # clustering algorithm from sklearn.neighbors import NearestNeighbors # neigbourhoods used for clustering from kneed import KneeLocator # locating knee point of clustering algorithm DATA_EXISTS = True SHOW_EXCEPTIONS = True TB_RAW = 'geolife_raw_data' TB_CLEAN = 'geolife_ref' ENGINE = create_engine('postgresql://postgres:postgres@localhost/postgres') CON = ENGINE.connect() CRS = '4326' BEIJING = [39.9, 116.41] TB_BEIJING = 'beijing_raw' B1 = 115.779419, 39.626846 B2 = 116.952209, 40.357010 COLOURS = ['green', 'blue', 'orange', 'pink', 'yellow', 'purple', 'red', 'brown', 'gray', 'aqua', 'peru', 'gold', 'crimson', 'olive', 'white'] def runsql(query): try: ENGINE.execute(query) except Exception as exep: if SHOW_EXCEPTIONS: print(exep) class GeoLifeHandler: def __init__(self): # apply numeric values for transport modes to save space self.mode_names = ['walk', 'bike', 'bus', 'car', 'subway','train', 'airplane', 'boat', 'run', 'motorcycle', 'taxi'] self.mode_ids = {s : i + 1 for i, s in enumerate(self.mode_names)} self.progress_bar = widgets.FloatProgress( value=0, min=0, max=10.0, description='Loading:', bar_style='info', style={'bar_color': '#0000FF'}, orientation='horizontal') self.folder = 'undefined' self.geolife_df = [] self.limit = '' # read plt file def read_plt(self, plt_file): points = pd.read_csv(plt_file, skiprows=6, header=None, parse_dates=[[5, 6]], infer_datetime_format=True) points.rename(inplace=True, columns={'5_6': 'time', 0: 'lat', 1: 'lon', 3: 'alt'}) # rename cols points.drop(inplace=True, columns=[2, 4]) # remove unused columns return points # read labels (where applicable) def read_labels(self, labels_file): labels = pd.read_csv(labels_file, skiprows=1, header=None, parse_dates=[[0, 1], [2, 3]], infer_datetime_format=True, delim_whitespace=True) labels.columns = ['start_time', 'end_time', 'label'] labels['label'] = [self.mode_ids[i] for i in labels['label']] # enumerate return labels # apply travel mode labels to points (enumerated to save bits) def apply_labels(self, points, labels): indices = labels['start_time'].searchsorted(points['time'], side='right') - 1 no_label = (indices < 0) | (points['time'].values >= labels['end_time'].iloc[indices].values) points['label'] = labels['label'].iloc[indices].values points['label'][no_label] = 0 # read individual user (folder is preassigned to each user) def read_user(self, user_folder): labels = None plt_files = glob.glob(os.path.join(user_folder, 'Trajectory', '*.plt')) df = pd.concat([self.read_plt(f) for f in plt_files]) labels_file = os.path.join(user_folder, 'labels.txt') if os.path.exists(labels_file): labels = self.read_labels(labels_file) self.apply_labels(df, labels) else: df['label'] = 0 return df # interate through all users (defined by folders in Data folder) def read_all_users(self): subfolders = os.listdir(self.folder) dfs = [] self.progress_bar.max = len(subfolders) display(self.progress_bar) for i, sf in enumerate(subfolders): #print('processing user {} of {}'.format(i + 1, len(subfolders))) self.progress_bar.value = i + 1 df = self.read_user((os.path.join(self.folder,sf))) df['user'] = int(sf) dfs.append(df) print('Load Complete') self.geolife_df = pd.concat(dfs) def set_location(self, file): self.folder = file def get_df(self): return self.geolife_df class FoliumCreator: def __init__(self, zoom=10, size='600px', center=BEIJING, scale_pos='topleft'): self.zoom = zoom self.size = size self.center = center # using American english spelling as that's what leaflet uses, not bc I like it :( self.layout = Layout(height=self.size) self.scale_pos = scale_pos def set_layout(self, layout): self.layout = layout def set_zoom(self, level): self.zoom = level def get_map(self): m = Map(center=self.center, zoom=self.zoom, layout=self.layout, basemap=basemaps.Stamen.Terrain) self.set_control(m, ScaleControl(position=self.scale_pos)) return m def set_control(self, instance, control): instance.add_control(control) def set_layer(self, instance, layer): instance.add_layer(layer) class UserObject: def __init__(self, scanner, panel_plots, folium_map, mapcontrol_object, dashboard_mode=False): self.id = -1 self.trips = {} self.id_count = 182 self.dashboard_mode = dashboard_mode self.scan_object = scanner self.panel_plots = panel_plots self.folium_map = folium_map self.mapper = mapcontrol_object self.bt_submit_user = widgets.Button(description='Select') self.bt_submit_user.on_click(self.submit_user) self.bt_random_user = widgets.Button(description='Randomize') self.bt_random_user.on_click(self.randomize_user) self.first_user_id = self.get_random_id() self.user_options = widgets.Dropdown( value=self.first_user_id, options=self.get_all_ids(), disabled=False, layout={'width': '30%'}) self.user_options.observe(self.user_change, 'value') self.dp_userinfo = widgets.Output(layout={'border': 'solid black 1px', 'max_width': '360px'}) self.clusterbox = widgets.Output(layout={'border': 'solid black 1px', 'max_width': '400px', 'min_width': '400px'}) self.get_user_info(self.first_user_id) self.scan_object.reset(self.gdf, self, self.panel_plots) self.scan_object.show_plot() if self.dashboard_mode: self.bt_submit_user.disabled = True def display(self): display(HBox([HTML('<b>User: '), self.user_options, self.bt_random_user, self.bt_submit_user])) display(HBox([self.dp_userinfo, widgets.Output(layout={'min_width': '75px'}), self.clusterbox])) def submit_user(self, b): b.disabled = True self.bt_random_user.disabled = True self.user_options.disabled = True self.set_id(self.user_options.value) print('User {} selected!'.format(self.get_id())) def randomize_user(self, b): self.user_options.value = self.get_random_id() def user_change(self, value): self.get_user_info(value.new) if self.dashboard_mode: try: self.set_id(value.new) self.scan_object.reset(self.gdf, self, self.panel_plots) self.clean_gdf() self.get_antline() self.scan_object.show_plot() except Exception as e: print('ERROR during processing. Exception:') print(e) def set_id(self, new_id): self.id = new_id def get_id(self): return self.id # currently unused def set_clusterbox(self, content): with self.clusterbox: print(content) def get_all_ids(self): return [i for i in range(0, self.id_count)] def get_random_id(self): return random.randint(0, self.id_count - 1) def get_user_info(self, test_id): self.bt_random_user.description = 'Loading...' self.bt_random_user.disabled = True info = CON.execute('select count(*), count(distinct(label)), min(time), max(time) from {} where user_id = {}' .format(TB_BEIJING, test_id)).first() gdf_simplified = gpd.GeoDataFrame.from_postgis((('SELECT ' 'st_x((st_dumppoints(st_removerepeatedpoints(st_collect(geom), 0.001))).geom) as lat, ' 'st_y((st_dumppoints(st_removerepeatedpoints(st_collect(geom), 0.001))).geom) as lon, ' '(st_dumppoints(st_removerepeatedpoints(st_collect(geom), 0.001))).geom as geom ' 'FROM {} WHERE user_id = {}'.format(TB_BEIJING, test_id))), CON) self.gdf = gdf_simplified self.dp_userinfo.clear_output() with self.dp_userinfo: print('User {} Summary\n--------------------------------------------'.format(test_id)) print(('Data points: {}\nUnique Travel Methods: {}\nData Begin: {}' '\nData End: {}'.format(info[0], info[1], info[2], info[3]))) self.bt_random_user.description = 'Randomize' self.bt_random_user.disabled = False def clean_gdf(self): self.gdf['label'] = self.scan_object.get_labels() self.gdf['time'] = 'datetime' total_count = self.gdf.count()[0] noise_index = self.gdf[self.gdf['label'] == -1].index self.cleaned_gdf = self.gdf self.cleaned_gdf.drop(noise_index, inplace=True) clean_count = self.cleaned_gdf.count()[0] noise_count = total_count - clean_count self.clusterbox.clear_output() with self.clusterbox: print(('Out of {} points, {} were detected as noise ({}%). With noise removed, {} points remain ' 'with {} cluster(s)\n\nNote that cluster colours between plots are not constant') .format(total_count, noise_count, round(noise_count / total_count * 100, 2), clean_count, len(self.gdf['label'].unique()))) def get_antline(self): self.mapper.set_zoom(9) self.folium_map.clear_layers() self.folium_map.add_layer(basemaps.Stamen.Terrain) by_cluster = self.cleaned_gdf.groupby('label') by_cluster = dict(list(by_cluster)) for cluster in by_cluster.keys(): current_gdf = by_cluster[cluster] current_geom_list = [(x, y) for x, y in zip(current_gdf['lon'], current_gdf['lat'])] ant_path = AntPath(locations=current_geom_list, dash_array=[1, 10], delay=3000, color=COLOURS[cluster], pulse_color='black') self.mapper.set_layer(self.folium_map, ant_path) class DBSCANner: def __init__(self): self.backup_knee = 1 def reset(self, gdf, user, sidebar, samples=20): self.samples = samples self.gdf = gdf self.geom_list = [(x, y) for x, y in zip(self.gdf['lat'], self.gdf['lon'])] self.panel = sidebar self.user = user self.panel.clear_output() with self.panel: self.fig, self.axes = plt.subplots(2, 1, figsize=(4, 8)) self.distances = self.get_distances() self.knee = self.get_knee() self.labels = self.get_DBSCAN() self.gdf['cluster'] = self.labels self.plot_knee() self.plot_clusters() def get_distances(self): # get nearest neigbours # Americanized spelling as this is the name of the function, not because I like it try: if (len(self.geom_list) < self.samples + 1): self.samples = int(round(len(self.geom_list) / 2, 0)) nn = NearestNeighbors(n_neighbors=self.samples + 1) neighbors = nn.fit(self.geom_list) distances, indices = neighbors.kneighbors(self.geom_list) distances = np.sort(distances[:, self.samples], axis=0) return distances except Exception as e: print('\nERROR during processing. Exception:') print(e) def get_knee(self): try: # calculate knee point i = np.arange(len(self.distances)) knee = KneeLocator(i, self.distances, S=1, curve='convex', direction='increasing', interp_method='polynomial') # make sure knee returns as number if (isinstance(knee.knee, int)): knee_val = self.backup_knee else: knee_val = knee.knee # save knee point return knee_val # lots of errors/exceptions occur on 1 or 2 users with very low n. handling a single one and returning # is not feasible. return knee point of 1 instead, better than crashing except: return 1 def get_DBSCAN(self): try: # calculate DBSCAN clusters (classify) dbscan = DBSCAN(eps=self.distances[self.knee], min_samples=self.samples).fit(self.geom_list) # get labels of clusters, return them as list labels = dbscan.labels_ return labels except Exception as e: print('\nERROR during processing. Exception:') print(e) def plot_knee(self): with self.panel: try: self.axes[0].set_title('Knee Point of Nearest Neighbours for User {} Mobility'.format(self.user.get_id())) self.axes[0].plot(self.distances) self.axes[0].axvline(x=self.knee, color='gray', linestyle='--') #self.axes[0].text((self.knee, (0.5 * self.axes[0].get_ylim()[0])), # 'knee={}'.format(self.knee)) self.axes[0].set_xlabel('Points') self.axes[0].set_ylabel('Distances') except Exception as e: print('ERROR during processing. Exception:') print(e) def plot_clusters(self): with self.panel: '''scatter_plot = sns.scatterplot(x=self.gdf['lat'], y=self.gdf['lon'], marker="o", ax=self.axes[1], hue=["{cluster}".format(cluster = 'Cluster ' + str(cur_label) if cur_label != -1 else 'Noise') for cur_label in self.gdf['labels']])''' scatter_plot = sns.scatterplot(x=self.gdf['lat'], y=self.gdf['lon'], marker="o", ax=self.axes[1], palette='Set2', hue=self.gdf['cluster'], legend='full') scatter_plot.set(xlabel='Latitude', ylabel='Longitude', title='Clusters in User {} Mobility using DBSCAN\n(where -1 is noise)' .format(self.user.get_id())) def get_labels(self): return self.labels def show_plot(self): with self.panel: plt.tight_layout() plt.show() if not DATA_EXISTS: bt_submit = widgets.Button(description='Select') file_options = widgets.Dropdown( options=[(os.path.join(os.getcwd(), 'geolife_trajectories\Data')), (os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop\Data')), (os.path.join(os.path.join(os.environ['USERPROFILE']), 'Downloads\Data'))], disabled=False, layout={'width': '70%'}) display(VBox([HTML('<h4>GeoLife File Location</h4>'), HBox([file_options, bt_submit])])) def bt_submitevent(b): if os.path.exists(file_options.value): b.disabled = True handler.set_location(file_options.value) handler.read_all_users() else: print('Data file not found. Please try again...') handler = GeoLifeHandler() bt_submit.on_click(bt_submitevent) try: geolife_raw = handler.get_df() display(geolife_raw.count()) display(geolife_raw.head(10)) geolife_raw = geolife_raw.head(10) except: print('Data either not yet extracted, or already available on server') uploader = PostGISUploader() uploader.upload(geolife_raw) mapcontroller = FoliumCreator() map_main = mapcontroller.get_map() panel_plots = widgets.Output() panel_top = HBox([map_main, panel_plots]) display(panel_top) scanner = DBSCANner() user = UserObject(scanner, panel_plots, map_main, mapcontroller, dashboard_mode=True) #scanner.reset(gdf_simple, user, panel_plots) user.display() # TO DO # -get first user working # -fix issues # -get time column working # -sync colours of cluster plot and map ```
github_jupyter
``` import numpy as np from scipy.spatial import ConvexHull import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import mpl_toolkits.mplot3d as a3 import LassoHull ##### Methods for Evalution ##### import cvxopt def project_to_hull(beta, B): m = B.shape[1] P = cvxopt.matrix(B.T.dot(B)) q = cvxopt.matrix(-B.T.dot(beta)) A = cvxopt.matrix(np.ones((1,m))) b = cvxopt.matrix(np.array([1.0])) G = cvxopt.matrix(-np.identity(m)) h = cvxopt.matrix(np.zeros(m)) cvxopt.solvers.options['show_progress'] = False sol = cvxopt.solvers.qp(P,q,A=A,b=b,G=G,h=h) ans = np.transpose(np.array(sol['x'])).dot(B.T) return ans[0] def evaluate_hull(C, B): dist = [] for k in range(1, C.shape[1]+1): max_dist = 0 for i in range(B.shape[1]): c = project_to_hull(B[:, i], C[:, :k]) d = np.linalg.norm(B[:, i] - c) if d > max_dist: max_dist = d dist.append(max_dist) return dist def lasso_obj(beta, X, y, rho): n = y.size res = y - X.dot(beta) return 0.5 * res.dot(res) / n + rho * np.linalg.norm(beta, 1) def find_lasso_boundary(beta, X, y, rho, d, nu, eps=1e-5): # binary search lam_prev = 1.0 obj_prev = lasso_obj(beta + lam_prev * d, X, y, rho) if obj_prev >= nu: lam = lam_prev * 0.5 else: lam = lam_prev * 2.0 while True: obj = lasso_obj(beta + lam * d, X, y, rho) if obj >= nu: lam_prev = lam lam *= 0.5 else: lam_prev = lam lam *= 2.0 if (obj >= nu and obj_prev < nu) or (obj_prev >= nu and obj < nu): break lam_lb = min(lam, lam_prev) lam_ub = max(lam, lam_prev) while True: lam = 0.5 * (lam_lb + lam_ub) obj = lasso_obj(beta + lam * d, X, y, rho) if obj >= nu: lam_ub = lam else: lam_lb = lam if obj <= nu and nu - obj < eps: break return beta + lam * d def sample_boundary(beta, X, y, rho, nu, K=100, eps=1e-5, seed=0): p = X.shape[1] B = [] for k in range(K): np.random.seed(k) d = np.random.randn(p) d /= np.linalg.norm(d) b = find_lasso_boundary(beta, X, y, rho, d, nu, eps=eps) B.append(b) return B ``` ## Synthetic Data - p=2 ``` # synthetic data in p=2 X = np.array([[1, 1], [1, 1+1/40]]) y = np.ones(2) rho = 1.0 / 2 # LassoHull: # of sampling=30, # of vertecies=4 hull = LassoHull.LassoHull(nu=31/30, eta=-1.0, seed=0) # nu = nu^* + 1/40 hull.set_data(X, y, rho) hull.add_extreme(M=30, verbose=-1) hull.initialize_hull() hull.add_vertex(K=3) # plot B(nu) - fill fig = plt.figure(figsize=(4, 4)) t = np.linspace(-1 / 80, 0, 200) plt.fill_between(t, (-np.sqrt(80 * t + 1) / np.sqrt(10) - 2 * t + 1) / 2, (np.sqrt(80 * t + 1) / np.sqrt(10) - 2 * t + 1) / 2, facecolor=[0.8, 0.8, 0.9]) t = np.linspace(0, (10 - np.sqrt(10))/20, 200) plt.fill_between(t, (-1/np.sqrt(10) - 2 * t + 1) / 2, (1/np.sqrt(10) - 2 * t + 1) / 2, facecolor=[0.8, 0.8, 0.9]) t = np.linspace((10 - np.sqrt(10))/20, (10 + np.sqrt(10))/20, 200) plt.fill_between(t, (3 - 2 * t - np.sqrt(81 - 40 * t - 40 * np.abs(t)) / np.sqrt(10)) / 2, (1/np.sqrt(10) - 2 * t + 1) / 2, facecolor=[0.8, 0.8, 0.9]) # plot B(nu) - edge t = np.linspace(0, (10 + np.sqrt(10))/20, 200) plt.plot(t, (1/np.sqrt(10) - 2 * t + 1) / 2, 'k--') t = np.linspace(0, (10 - np.sqrt(10))/20, 200) plt.plot(t, (-1/np.sqrt(10) - 2 * t + 1) / 2, 'k--') t = np.linspace(-1 / 80, 0, 200) plt.plot(t, (np.sqrt(80 * t + 1) / np.sqrt(10) - 2 * t + 1) / 2, 'k--') t = np.linspace(-1/ 80, 0, 200) plt.plot(t, (-np.sqrt(80 * t + 1) / np.sqrt(10) - 2 * t + 1) / 2, 'k--') t = np.linspace((10 - np.sqrt(10)) / 20, (10 + np.sqrt(10)) / 20, 200) plt.plot(t, (3 - 2 * t - np.sqrt(81 - 40 * t - 40 * np.abs(t)) / np.sqrt(10)) / 2, 'k--') # plot result for c in hull.C_.T: plt.plot(c[0], c[1], 'bs', markersize=16) for b in hull.B_: plt.plot(b[0], b[1], 'o', markersize=10, markeredgecolor='r', markerfacecolor=[1.0, 0.8, 0.8]) plt.plot(hull.C_[0, [0, 1]], hull.C_[1, [0, 1]], 'b-') plt.plot(hull.C_[0, [0, 3]], hull.C_[1, [0, 3]], 'b-') plt.plot(hull.C_[0, [1, 2]], hull.C_[1, [1, 2]], 'b-') plt.plot(hull.C_[0, [2, 3]], hull.C_[1, [2, 3]], 'b-') plt.plot(hull.beta_[0], hull.beta_[1], 'g*', markersize=18, markerfacecolor='w') plt.xlim([-0.05, 0.7]) plt.ylim([-0.05, 0.7]) plt.xlabel('beta_1', fontsize=16) plt.ylabel('beta_2', fontsize=16) plt.xticks([0.2*i for i in range(0, 4)], fontsize=16) plt.yticks([0.2*i for i in range(0, 4)], fontsize=16) plt.gca().set_aspect('equal', adjustable='box') plt.show() # LassoHull: Psuedo Ground Truth hullB = LassoHull.LassoHull(nu=31/30, eta=-1.0, seed=0) hullB.set_data(X, y, rho) hullB.add_extreme(M=1000, verbose=-1) dist = evaluate_hull(hull.C_, np.array(hullB.B_).T) # plot Hausdorff distance fig = plt.figure() plt.plot(range(1, len(dist)+1), dist, 'bo-') plt.xticks(range(1, len(dist)+1), fontsize=16) plt.yticks([0, 0.5, 1.0], fontsize=16) plt.gca().set_aspect('equal', adjustable='box') plt.xlabel('K', fontsize=16) plt.ylabel('Hausdorff dist.', fontsize=16) plt.show() ``` ## Synthetic Data - p=3 ``` # synthetic data in p=3 X = np.array([[1, 1, 1], [1, 1+1/40, 1], [1, 1, 1+2/40]]) y = np.ones(3) rho = 1.0 / 3 # LassoHull: # of sampling=50, # of vertecies=6 hull = LassoHull.LassoHull(nu=103/100, eta=-1.0, seed=0) # nu = nu^* + 1/40 hull.set_data(X, y, rho) hull.add_extreme(M=50, verbose=-1) hull.initialize_hull() hull.add_vertex(K=5) # boundary points hullB = LassoHull.LassoHull(nu=103/100, eta=-1.0, seed=0) # nu = nu^* + 1/40 hullB.set_data(X, y, rho) hullB.add_extreme(M=200, verbose=-1) beta = np.mean(np.array(hullB.B_), axis=0) C = sample_boundary(beta, X, y, rho, hull.nu * hull.obj_, K=200) C.extend(hullB.B_) C = np.array(C) Q = ConvexHull(C) # plot result fig = plt.figure(figsize=(6, 6)) ax = fig.add_subplot(111, projection='3d') poly = [C[s, :] for s in Q.simplices] tri = a3.art3d.Poly3DCollection(poly, facecolors=[0.8, 0.8, 0.9], alpha=0.001, linewidths=0) ax.add_collection3d(tri) ax.scatter(hull.C_[0, :], hull.C_[1, :], hull.C_[2, :], marker='s', color='b', depthshade=False, s=208) B = np.array(hull.B_) ax.scatter(B[:, 0], B[:, 1], B[:, 2], marker='o', color=[1.0, 0.8, 0.8], depthshade=False, s=64, edgecolor='r') ax.plot(hull.C_[0, [0, 1]], hull.C_[1, [0, 1]], hull.C_[2, [0, 1]], 'b-') ax.plot(hull.C_[0, [0, 2]], hull.C_[1, [0, 2]], hull.C_[2, [0, 2]], 'b-') ax.plot(hull.C_[0, [0, 4]], hull.C_[1, [0, 4]], hull.C_[2, [0, 4]], 'b-') ax.plot(hull.C_[0, [1, 2]], hull.C_[1, [1, 2]], hull.C_[2, [1, 2]], 'b-') ax.plot(hull.C_[0, [1, 3]], hull.C_[1, [1, 3]], hull.C_[2, [1, 3]], 'b-') ax.plot(hull.C_[0, [2, 5]], hull.C_[1, [2, 5]], hull.C_[2, [2, 5]], 'b-') ax.plot(hull.C_[0, [3, 4]], hull.C_[1, [3, 4]], hull.C_[2, [3, 4]], 'b-') ax.plot(hull.C_[0, [3, 5]], hull.C_[1, [3, 5]], hull.C_[2, [3, 5]], 'b-') ax.plot(hull.C_[0, [4, 5]], hull.C_[1, [4, 5]], hull.C_[2, [4, 5]], 'b-') ax.scatter(hull.beta_[0], hull.beta_[1], hull.beta_[2], marker='*', color='w', s=256, depthshade=False, edgecolor='g') ax.set_xlim(-0.05, 0.8) ax.set_ylim(-0.05, 0.8) ax.set_zlim(-0.05, 0.8) ax.set_xlabel('beta_1', fontsize=14) ax.set_ylabel('beta_2', fontsize=14) ax.set_zlabel('beta_3', fontsize=14) ax.set_xticks([i*0.2 for i in range(0, 5)]) ax.set_xticklabels(['%.1f' % (i*0.2,) for i in range(0, 5)], fontsize=14) ax.set_yticks([i*0.2 for i in range(0, 5)]) ax.set_yticklabels(['%.1f' % (i*0.2,) for i in range(0, 5)], fontsize=14) ax.set_zticks([i*0.2 for i in range(0, 5)]) ax.set_zticklabels(['%.1f' % (i*0.2,) for i in range(0, 5)], fontsize=14) ax.view_init(30, 30) plt.gca().set_aspect('equal', adjustable='box') plt.show() # LassoHull: Psuedo Ground Truth hullB = LassoHull.LassoHull(nu=103/100, eta=-1.0, seed=0) hullB.set_data(X, y, rho) hullB.add_extreme(M=1000, verbose=-1) dist = evaluate_hull(hull.C_, np.array(hullB.B_).T) # plot Hausdorff distance fig = plt.figure() plt.plot(range(1, len(dist)+1), dist, 'bo-') plt.xticks(range(1, len(dist)+1), fontsize=16) plt.yticks([0, 0.5, 1.0], fontsize=16) plt.gca().set_aspect('equal', adjustable='box') plt.xlabel('K', fontsize=16) plt.ylabel('Hausdorff dist.', fontsize=16) plt.show() ```
github_jupyter
# Find Clusters of Infected People <span style="color:red"> **URGENT WARNING** We have been receiving reports from health facilities that a new, fast-spreading virus has been discovered in the population. To prepare our response, we need to understand the geospatial distribution of those who have been infected. Find out whether there are identifiable clusters of infected individuals and where they are. </span> Your goal for this notebook will be to estimate the location of dense geographic clusters of infected people using incoming data from day 1 of the simulated epidemic. ## Imports ``` import cudf import cuml import cupy as cp ``` ## Load Data Begin by loading the data you've received about day 1 of the outbreak into a cuDF data frame. The data is located at `'../data/data_pop.csv'`. For this notebook you will only need the `'lat'`, `'long'`, and `'infected'` columns. Either drop the columns after loading, or use the `cudf.read_csv` named argument `usecols` to provide a list of only the columns you need. ## Make Data Frame of the Infected Make a new cuDF data frame `infected_df` that contains only the infected members of the population. ## Make Grid Coordinates for Infected Locations Provided for you in the next cell (which you can expand by clicking on the "..." and contract again after executing by clicking on the blue left border of the cell) is a lat/long to OSGB36 grid coordinates converter. Use this converter by passing it `lat` and `long` values to create grid coordinate values stored in `northing` and `easting` columns of the `infected_df` you created in the last step. **Optional**: For more information on grid coordinates read [this blog post](https://gisgeography.com/latitude-longitude-coordinates/). ``` # https://www.ordnancesurvey.co.uk/docs/support/guide-coordinate-systems-great-britain.pdf def latlong2osgbgrid_cupy(lat, long, input_degrees=True): ''' Converts latitude and longitude (ellipsoidal) coordinates into northing and easting (grid) coordinates, using a Transverse Mercator projection. Inputs: lat: latitude coordinate (N) long: longitude coordinate (E) input_degrees: if True (default), interprets the coordinates as degrees; otherwise, interprets coordinates as radians Output: (northing, easting) ''' if input_degrees: lat = lat * cp.pi/180 long = long * cp.pi/180 a = 6377563.396 b = 6356256.909 e2 = (a**2 - b**2) / a**2 N0 = -100000 # northing of true origin E0 = 400000 # easting of true origin F0 = .9996012717 # scale factor on central meridian phi0 = 49 * cp.pi / 180 # latitude of true origin lambda0 = -2 * cp.pi / 180 # longitude of true origin and central meridian sinlat = cp.sin(lat) coslat = cp.cos(lat) tanlat = cp.tan(lat) latdiff = lat-phi0 longdiff = long-lambda0 n = (a-b) / (a+b) nu = a * F0 * (1 - e2 * sinlat ** 2) ** -.5 rho = a * F0 * (1 - e2) * (1 - e2 * sinlat ** 2) ** -1.5 eta2 = nu / rho - 1 M = b * F0 * ((1 + n + 5/4 * (n**2 + n**3)) * latdiff - (3*(n+n**2) + 21/8 * n**3) * cp.sin(latdiff) * cp.cos(lat+phi0) + 15/8 * (n**2 + n**3) * cp.sin(2*(latdiff)) * cp.cos(2*(lat+phi0)) - 35/24 * n**3 * cp.sin(3*(latdiff)) * cp.cos(3*(lat+phi0))) I = M + N0 II = nu/2 * sinlat * coslat III = nu/24 * sinlat * coslat ** 3 * (5 - tanlat ** 2 + 9 * eta2) IIIA = nu/720 * sinlat * coslat ** 5 * (61-58 * tanlat**2 + tanlat**4) IV = nu * coslat V = nu / 6 * coslat**3 * (nu/rho - cp.tan(lat)**2) VI = nu / 120 * coslat ** 5 * (5 - 18 * tanlat**2 + tanlat**4 + 14 * eta2 - 58 * tanlat**2 * eta2) northing = I + II * longdiff**2 + III * longdiff**4 + IIIA * longdiff**6 easting = E0 + IV * longdiff + V * longdiff**3 + VI * longdiff**5 return(northing, easting) ``` ## Find Clusters of Infected People Use DBSCAN to find clusters of at least 25 infected people where no member is more than 2000m from at least one other cluster member. Create a new column in `infected_df` called `cluster_label` which contains the cluster to which each infected person belongs. ## Find the Center of Each Cluster Use grouping to find the mean `'easting'` and `'northing'` values for each cluster identified above.
github_jupyter
# Hosting models on Grid Grid offers both: Machine Learning as a Service and Encrypted Machine Learning as a service. In this series of tutorials we show how you can serve and query models on Grid. This option consists of: **Owner** 1. Owner has a model ```python model = Plan() model.build(data) ``` 2. Owner shares the model and sends the model to alice in an encrypted fashion ```python plan.fix_precision().share(bob, charlie, crypto_provider=dan).send(alice) ``` **User** 1. User fetch the plan (this means they have the state locally as pointers to alice) but they can fetch a plan only once, which means only one user can run inference. This limitation could be turned into a feature if we consider that in the future we may ask for authentication for a user to have access to a model. So the owner could build the model, share it and send to a remote worker (here alice). Then the owner could tell the user "Hey here's your token, now you have access to the model, you now have access to a encrypted version of my model, but hey, don't lose this model copy, okay? If you lose it you'll have to ask for a new token." ``` # Fetch plan fetched_plan = plan.owner.fetch_plan(plan.id, alice) ``` 2. User shares their data with the same workers ``` x = th.tensor([-1.0]) x_sh = data.fix_precision().share(bob, charlie, crypto_provider=dan) ``` 3. User can run inference using this model copy ``` decrypted = fetched_plan(x_sh).get().float_prec() ``` A few notes: - No one knows the model except the model owner (yay!!!) - The model is secure because we only have access to pointers not the actual weights - The user has access to the readable_plan which means the user can figure out the model architecture but not the weight values ## 3.2 Host and query an encrypted model ### Fetch can be done only once In the previous tutorial we served a CNN for classifying images with different 2 types of skin deseases: benign keratosis and melanoma (type of skin cancer). In this tutorial we show how to serve this model on a **encrypted way** on Grid. ### Imports and model specifications ``` # Import dependencies import torch as th import syft as sy import torch.nn as nn import torch.nn.functional as F import grid as gr import helper # Hook hook = sy.TorchHook(th) me = hook.local_worker me.is_client_worker = False # Connect to nodes alice = gr.WebsocketGridClient(hook, "http://localhost:3001", id="Alice") alice.connect() bob = gr.WebsocketGridClient(hook, "http://localhost:3000", id="Bob") charlie = gr.WebsocketGridClient(hook, "http://localhost:3002", id="James") dan = gr.WebsocketGridClient(hook, "http://localhost:3003", id="Dan") bob.connect() charlie.connect() dan.connect() # Connect nodes to each other gr.connect_all_nodes([bob, alice, charlie, dan]) ``` ### Load dataset ``` df = helper.read_skin_cancer_dataset() train_df, valid_df, test_df = helper.split_data(df) # These values are from Part 1. input_size = 32 train_mean, train_std = (th.tensor([0.6979, 0.5445, 0.5735]), th.tensor([0.0959, 0.1187, 0.1365])) # Create a test dataloader test_set = helper.Dataset(test_df, transform=helper.transform(input_size, train_mean, train_std)) test_generator = th.utils.data.DataLoader(test_set, batch_size=1, shuffle=True) # Get a data sample and a target data, target = next(iter(test_generator)) ``` ### Making a model ready to be served and encrypted In order to serve the model it needs to be serializable. A Plan is intended to store a sequence of torch operations, just like a function, but it allows to send this sequence of operations to remote workers and to keep a reference to it. You can learn more about plans in [Syft's tutorials](https://github.com/OpenMined/PySyft/blob/dev/examples/tutorials/Part%2008%20-%20Introduction%20to%20Plans.ipynb). ### Define Model Let's load the model we just trained. ``` model = helper.make_model(is_plan=True) # model.load_state_dict(th.load("binary-skin-cancer-detection-model")) model.build(data) helper.test(model, test_generator) ``` ## Serve model ``` model.fix_precision().share(bob, charlie, crypto_provider=dan).send(alice) from IPython.display import display_html def restart_kernel() : display_html("<script>Jupyter.notebook.kernel.restart()</script>",raw=True) restart_kernel() # Import dependencies import torch as th import syft as sy import torch.nn as nn import torch.nn.functional as F import grid as gr import helper # Hook hook = sy.TorchHook(th) me = hook.local_worker me.is_client_worker = False # Connect to nodes alice = gr.WebsocketGridClient(hook, "http://localhost:3001", id="Alice") alice.connect() bob = gr.WebsocketGridClient(hook, "http://localhost:3000", id="Bob") charlie = gr.WebsocketGridClient(hook, "http://localhost:3002", id="James") dan = gr.WebsocketGridClient(hook, "http://localhost:3003", id="Dan") bob.connect() charlie.connect() dan.connect() # Connect nodes to each other gr.connect_all_nodes([bob, alice, charlie, dan]) ``` ### Load dataset ``` df = helper.read_skin_cancer_dataset() train_df, valid_df, test_df = helper.split_data(df) # These values are from Part 1. input_size = 32 train_mean, train_std = (th.tensor([0.6979, 0.5445, 0.5735]), th.tensor([0.0959, 0.1187, 0.1365])) # Create a test dataloader test_set = helper.Dataset(test_df, transform=helper.transform(input_size, train_mean, train_std)) test_generator = th.utils.data.DataLoader(test_set, batch_size=1, shuffle=True) # Get a data sample and a target data, target = next(iter(test_generator)) x_sh = data.fix_precision().share(bob, charlie, crypto_provider=dan) ``` ### Get a copy of the private model ``` # Fetch plan fetched_plan = me.fetch_plan("convnet", alice, copy=True) ``` ### Run encrypted inference ``` %%time print(fetched_plan(x_sh).get().float_prec()) ``` # Congratulations!!! - Time to Join the Community! Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways! ## Star PySyft on GitHub The easiest way to help our community is just by starring the GitHub repos! This helps raise awareness of the cool tools we're building. [Star PySyft](https://github.com/OpenMined/PySyft) ## Join our Slack! The best way to keep up to date on the latest advancements is to join our community! You can do so by filling out the form at http://slack.openmined.org ## Join a Code Project! The best way to contribute to our community is to become a code contributor! At any time you can go to PySyft GitHub Issues page and filter for "Projects". This will show you all the top level Tickets giving an overview of what projects you can join! If you don't want to join a project, but you would like to do a bit of coding, you can also look for more "one off" mini-projects by searching for GitHub issues marked "good first issue". [PySyft Projects](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3AProject) [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) ## Donate If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups! [OpenMined's Open Collective Page](https://opencollective.com/openmined)
github_jupyter
``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np ``` # Supervised Learning Part 1 -- Regression Analysis In regression we are trying to predict a continuous output variable -- in contrast to the nominal variables we will be predicting in the classification examples later. Let's start with a simple toy example with one feature dimension (explanatory variable) and one target variable. We will create a dataset out of a sinus curve with some noise: ``` x = np.linspace(-3, 3, 100) print(x) rng = np.random.RandomState(42) y = np.sin(4 * x) + x + rng.uniform(size=len(x)) plt.plot(x, y, 'o'); ``` ## Linear Regression The first model that we will introduce is the so-called simple linear regression. Here, we want to fit a line to the data. One of the simplest models is a linear one, that simply tries to predict the data as lying on a line. One way to find such a line is `LinearRegression` (also known as [*Ordinary Least Squares (OLS)*] (https://en.wikipedia.org/wiki/Ordinary_least_squares) regression). The scikit-learn API requires us to provide the target variable (`y`) as a 1-dimensional array; scikit-learn's API expects the samples (`X`) in form a 2-dimensional array -- even though it may only consist of 1 feature. Thus, let us convert the 1-dimensional `x` NumPy array into an `X` array with 2 axes: ``` print('Before: ', x.shape) X = x[:, np.newaxis] print('After: ', X.shape) ``` Regression is a supervised task, and since we are interested in its performance on unseen data, we split our data into two parts: 1. a training set that the learning algorithm uses to fit the model 2. a test set to evaluate the generalization performance of the model The ``train_test_split`` function from the ``model_selection`` module does that for us -- we will use it to split a dataset into 75% training data and 25% test data. <img width="50%" src='https://github.com/fordanic/cmiv-ai-course/blob/master/notebooks/figures/train_test_split_matrix.png?raw=1'/> We start by splitting our dataset into a training (75%) and a test set (25%): ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) ``` ## The scikit-learn estimator API and Logistic Regresion <img width="50%" src='https://github.com/fordanic/cmiv-ai-course/blob/master/notebooks/figures/supervised_workflow.png?raw=1'/> Every algorithm is exposed in scikit-learn via an ''Estimator'' object. (All models in scikit-learn have a very consistent interface). For instance, we first import the linear regression class. ``` from sklearn.linear_model import LinearRegression ``` Next, we use the learning algorithm implemented in `LinearRegression` to **fit a regression model to the training data**: ``` regressor = LinearRegression() regressor.fit(X_train, y_train) ``` (Some estimator methods such as `fit` return `self` by default. Thus, after executing the code snippet above, you will see the default parameters of this particular instance of `LinearRegression`. Another way of retrieving the estimator's ininitialization parameters is to execute `regressor.get_params()`, which returns a parameter dictionary.) After fitting to the training data, we paramerterized a linear regression model with the following values. ``` print('Weight coefficients: ', regressor.coef_) print('y-axis intercept: ', regressor.intercept_) ``` Since our regression model is a linear one, the relationship between the target variable (y) and the feature variable (x) is defined as: $$y = weight \times x + \text{intercept}$$ Plugging in the min and max values into thos equation, we can plot the regression fit to our training data: ``` min_pt = X.min() * regressor.coef_[0] + regressor.intercept_ max_pt = X.max() * regressor.coef_[0] + regressor.intercept_ plt.plot([X.min(), X.max()], [min_pt, max_pt]) plt.plot(X_train, y_train, 'o'); ``` Similar to the estimators for classification in the previous notebook, we use the `predict` method to predict the target variable. And we expect these predicted values to fall onto the line that we plotted previously: ``` y_pred_train = regressor.predict(X_train) plt.plot(X_train, y_train, 'o', label="data") plt.plot(X_train, y_pred_train, 'o', label="prediction") plt.plot([X.min(), X.max()], [min_pt, max_pt], label='fit') plt.legend(loc='best') ``` As we can see in the plot above, the line is able to capture the general slope of the data, but not many details. Next, let's try the test set: ``` y_pred_test = regressor.predict(X_test) plt.plot(X_test, y_test, 'o', label="data") plt.plot(X_test, y_pred_test, 'o', label="prediction") plt.plot([X.min(), X.max()], [min_pt, max_pt], label='fit') plt.legend(loc='best'); ``` There is also a convenience function , ``score``, that all scikit-learn classifiers have to compute how good the model is: For regression tasks, this is the **R<sup>2</sup>** score. $$R^2= 1- \frac{\sum_{i}(f_i-y_i)^2}{\sum_{i}(y_i-\mu)^2}$$ ``` regressor.score(X_test, y_test) ``` ___ ## Exercise We will now look at a dataset with more than one variable. The ideas from the simple example above can be reused (we do not have to add an additional axis to the data). Create a linear regressor that fits the ``diabetes`` data. The methods that plot the data above cannot be used directly, can you adapt them in some way? ``` from sklearn.datasets import load_diabetes diabetes_data = load_diabetes() X, y = diabetes_data.data, diabetes_data.target print("The data is described at https://www4.stat.ncsu.edu/~boos/var.select/diabetes.html. The short description is:") print("Ten baseline variables, age, sex, body mass index, average blood pressure, and six blood serum measurements were obtained for each of n = 442 diabetes patients, as well as the response of interest, a quantitative measure of disease progression one year after baseline.") # We need to: # Split the data into training and test sets # Create a LinearRegression object # Fit the regressor object to the training data # Evaluate the model on the test data # ... from sklearn.decomposition import PCA pca = PCA(n_components=1).fit(X) X_decomp = pca.transform(X_test) regressor.fit(X_train, y_train) plt.plot(X_decomp, y_test, 'o') plt.plot(X_decomp, regressor.predict(X_test), 'x') X_noise = np.random.randn(*X_train.shape) regressor.fit(X_noise, y_train) plt.plot(X_decomp, y_test, 'o') plt.plot(X_decomp, regressor.predict(X_test), 'x') ``` ___ ## KNeighborsRegression This is a simple regression method that: given a new, unknown observation, look up in your reference database which ones have the closest features and take the output of the nearest point, or we could average several nearest points. This method is less popular for regression than for classification, but still a good baseline. ``` # First recreate our synthetic data x = np.linspace(-3, 3, 100) rng = np.random.RandomState(42) y = np.sin(4 * x) + x + rng.uniform(size=len(x)) X = x[:, np.newaxis] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) from sklearn.neighbors import KNeighborsRegressor kneighbor_regression = KNeighborsRegressor(n_neighbors=1) kneighbor_regression.fit(X_train, y_train) ``` Again, let us look at the behavior on training and test set: ``` y_pred_train = kneighbor_regression.predict(X_train) plt.plot(X_train, y_train, 'o', label="data", markersize=10) plt.plot(X_train, y_pred_train, 's', label="prediction", markersize=4) plt.legend(loc='best'); ``` On the training set, we do a perfect job: each point is its own nearest neighbor! ``` y_pred_test = kneighbor_regression.predict(X_test) plt.plot(X_test, y_test, 'o', label="data", markersize=8) plt.plot(X_test, y_pred_test, 's', label="prediction", markersize=4) plt.legend(loc='best'); ``` On the test set, we also do a better job of capturing the variation, but our estimates look much messier than before. Let us look at the R<sup>2</sup> score: ``` kneighbor_regression.score(X_test, y_test) ``` Much better than before! Here, the linear model was not a good fit for our problem; it was lacking in complexity and thus under-fit our data. As with linear regression, use the diabetes dataset and create a KNN-regressor. ``` from sklearn.datasets import load_diabetes diabetes_data = load_diabetes() X, y = diabetes_data.data, diabetes_data.target print("The data is described at https://www4.stat.ncsu.edu/~boos/var.select/diabetes.html. The short description is:") print('"Ten baseline variables, age, sex, body mass index, average blood pressure, and six blood serum measurements were obtained for each of n = 442 diabetes patients, as well as the response of interest, a quantitative measure of disease progression one year after baseline."') # We need to: # Split the data into training and test sets # Create a KNeighborsRegressor object (how many neighbours do you want to use?) # Fit the regressor object to the training data # Evaluate the model on the test data # ... from sklearn.decomposition import PCA pca = PCA(n_components=1).fit(X) X_decomp = pca.transform(X_test) kneighbor_regression.fit(X_train, y_train) plt.plot(X_decomp, y_test, 'o') plt.plot(X_decomp, kneighbor_regression.predict(X_test), 'x') ``` ___ ## Exercise Create a KNN-regressor for the diabetes data. ___ ___ ## Exercise Compare the KNeighborsRegressor and LinearRegression on the boston housing dataset. You can load the dataset using ``sklearn.datasets.load_boston``. You can learn about the dataset by reading the ``DESCR`` attribute. ``` # %load solutions/knn_vs_linreg.py ``` On Google Colab, visit [knn_vs_linreg.py](https://github.com/fordanic/cmiv-ai-course/blob/master/notebooks/solutions/knn_vs_linreg.py) and manually copy the content of the solution and paste to the cell above. ___
github_jupyter
# Predicting Immunotherapy Response based on RNA-Seq ### Importing Data and gene pathways TCGA Data ``` import pandas as pd import numpy as np tpm = pd.read_csv("data/tcga_sample/expression.tsv", sep="\t") survival = pd.read_csv("data/tcga_sample/survival.tsv", sep="\t", skiprows=1, header=None) meta = pd.read_csv("data/tcga_sample/metadata.tsv", sep="\t", skiprows=1, header=None) cytokines = pd.read_csv("data/genes.cytokine_immune.txt", skiprows=2, header=None) ``` ### Data Processing Get the TPM values for cytokines pathway ``` # Only use cytokine expression tpm = tpm.reindex(cytokines.iloc[:,0].unique(), axis='columns') tpm = tpm.dropna(axis=1) # perform quantile normalization # https://stackoverflow.com/questions/37935920/quantile-normalization-on-pandas-dataframe tpm /= np.max(np.abs(tpm),axis=0) # scale between [0,1] rank_mean = tpm.stack().groupby(tpm.rank(method='first').stack().astype(int)).mean() tpm = tpm.rank(method='min').stack().astype(int).map(rank_mean).unstack() # convert pandas df to np array tpm = tpm.values survival = survival.iloc[:,1:3].values ``` Split data into training and testing sets ``` # split the data into a training set and a validation set VALIDATION_SPLIT = 0.8 # indices = np.arange(tpm.shape[0]) # np.random.shuffle(indices) # # tpm = tpm[indices] # labels = surv_time[indices] num_validation_samples = int(VALIDATION_SPLIT * tpm.shape[0]) x_train = tpm[:num_validation_samples] y_train = survival[:num_validation_samples] x_val = tpm[num_validation_samples:] y_val = survival[num_validation_samples:] ``` ### Survival Neural Network Defining the loss function ``` import tensorflow as tf import keras.backend as K def negative_log_partial_likelihood_loss(regularization): #Wrapper function for the negative logg partial likelihood loss function def loss(y_true, risk): return negative_log_partial_likelihood(y_true, risk, regularization) return loss def negative_log_partial_likelihood(censor, risk, regularization): """Return the negative log-partial likelihood of the prediction y_true contains the survival time risk is the risk output from the neural network censor is the vector of inputs that are censored regularization is the regularization constant (not used currently) Uses the Keras backend to perform calculations Sorts the surv_time by sorted reverse time """ # calculate negative log likelihood from estimated risk K.print_tensor(censor) K.print_tensor(risk) hazard_ratio = K.exp(risk) log_risk = K.log(tf.cumsum(hazard_ratio)) # cumsum on sorted surv time accounts for concordance uncensored_likelihood = risk - log_risk censored_likelihood = uncensored_likelihood * censor num_observed_events = K.sum(censor) neg_likelihood = - K.sum(censored_likelihood) / tf.cast(num_observed_events, tf.float32) return neg_likelihood # np.random.seed(123) # negative_log_partial_likelihood(y_train, np.random.rand(y_train.shape[0], 2), 0).eval(session=K.get_session()) def batch_iter(data, labels, batch_size, shuffle=True, isValidationSet=False): num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1 # Sorts the batches by survival time def data_generator(): data_size = len(data) while True: # Sample from the dataset for each epoch if shuffle: shuffle_indices = np.random.permutation(np.arange(data_size)) shuffled_data = data[shuffle_indices] shuffled_labels = labels[shuffle_indices] else: shuffled_data = data shuffled_labels = labels for batch_num in range(num_batches_per_epoch): start_index = batch_num * batch_size end_index = min((batch_num + 1) * batch_size, data_size) X, y = shuffled_data[start_index: end_index], shuffled_labels[start_index: end_index] # Sort X and y by survival time in each batch idx = np.argsort(abs(y[:,0]))[::-1] X = X[idx, :] y = y[idx, 1].reshape(-1,1) # sort by survival time and take censored data # reshape for matmul y = y.reshape(-1,1) #reshape to [n, 1] for matmul yield X, y return num_batches_per_epoch, data_generator() train_steps, train_batches = batch_iter(x_train, y_train, 5) next(train_batches) from keras.models import Sequential from keras.layers import Dense, Dropout, Activation from keras import Sequential from keras.optimizers import Adam BATCH_SIZE = 30 model = Sequential() model.add(Dense(128, input_dim=x_train.shape[1], name="input")) model.add(Dense(64, activation='relu', name="dense_1")) model.add(Dropout(0.25, name="dropout_1")) model.add(Dense(64, activation='relu', name="dense_2")) model.add(Dense(1, activation='linear', name="output")) opt = Adam(lr=0.001) model_loss = negative_log_partial_likelihood_loss(0) model.compile(optimizer=opt, loss=model_loss, metrics=['accuracy']) # Accuracy is meaningless in this case, only look at loss train_steps, train_batches = batch_iter(x_train, y_train, BATCH_SIZE) valid_steps, valid_batches = batch_iter(x_val, y_val, BATCH_SIZE) history = model.fit_generator(train_batches, train_steps, epochs=20, validation_data=valid_batches, validation_steps=valid_steps) predictions = model.predict(x_val) predictions from lifelines.utils import concordance_index predictions_time = np.exp(predictions) concordance_index(y_val[:,0], predictions_time, y_val[:,1]) predictions_time y_val[:,0] ```
github_jupyter
"Eugene Fauma and Kenneth French won the Nobel Prize for their Efficient Market Hypothesis research, a consequence of the theory being 'it is impossible to "beat the market" consistently on a risk-adjusted basis since market prices should only react to new information. ' debate between traditional financial economics which uses risk theories to explain asset pricing and the newer behavioral finance field that uses human behavior to provide the explanations. Are premiums risk-based or behavioral-based? # Value Investing It has long been understood that cheap stocks have a tendency to outperform expensive stocks in the stock market. While this is not true every single year, over almost every 3 year cycle in the stock market cheap (or value) stocks outperform. This phenomenon has created the cult of 'Value Investing' born of the writings of Benjamin Graham and the phenomenal success of its arch proponent Warren Buffett. Benjamin Graham once said that 'in the short run the market is a voting machine but in the long run it's a weighing machine' snappily illustrating one of the critical ideas for stock market success - that the market over-reacts but eventually corrects itself. But to pick up the extra profit that can be made by exploiting this corrective mechanism, investors have long debated what is meant by 'cheap'. Generalising grossly, most valuation is done by comparing a company's share price against either what it can earn or what it owns. Price/Earnings ratios, Price/Cashflow ratios, Dividend Yield, Earnings Yield, EV/EBITDA ratios etc all aim to judge price against what a company can earn, while Price/Book, Price/Tangible Book, Price/Net current assets, Price/Cash and so on all compare price against what it owns. Consistent with the value investing philosophy, we use historic ratios for each of these metrics, rather than factoring in analyst forecasts. Investors are a hugely sectarian bunch and defend their preferred valuation metrics from all comers. But frankly, over the very long term there isn't that much difference in which ratio one prefers to use. The returns to ALL value ratios are highly correlated over the long run and have very similar return profiles. What one does find though is that during different market cycles, different ratios enjoy their time in the sun. A brief glance at recent history shows that this is so: * In the late nineties the Price to Sales ratio was most effective as dotcom companies failed to generate any earnings but flew to the moon. * In the recoveries from most bear markets (2002+, 2009+ etc) the biggest bargains judged by asset based measures (P/B, P/C) often massively outperform. Between 2002-2007, EV/EBITDA based metrics were all the rage as the credit bubble grew and private equity sought to buy companies outright. * In the current yield deprived times we live in, the dividend yield has been the most successful value metric as investors have thirsted for income. All this begs the question as to how can one build a perennial strategy based on a single favoured ratio? If you stick to your use of e.g. the P/B ratio at all times you could find that you massively underperform the market for a 3 year period. Nobody enjoys that kind of underperformance and frankly the majority of investors end up throwing in the towel on their strategy right at the wrong time. Is there a solution? Value Ratios Winners: While "in most cases we were able to observe an average outperformance of value stocks" they surprisingly found that the growth relative ratios (PEG and lesser known VEG) carried less volatility. The four winning value ratios (with annualised added value of between 7% and 10.5%) proved to be the Price to Cashflow relative to the sector, Price to Sales relative to sector, PEG (12 months forward PE vs Expected Growth), VEG ( EV/EBIT vs Expected Growth). Composite value scoring systems have been used successfully by many investors since Ben Graham originally designed his 'multiplier' that blended the P/E ratio and P/B ratio. This work has been extended by data driven investors for decades, and composite value scores & indices have been published by institutional investment platforms like Starmine (Thomson Reuters), Morningstar, S&P and Societe Generale. One of the most classic and inspiring studies is by James O'Shaughnessy in the 4th edition of What Works on Wall St in which he showed that composite value factors based on an average ranking of 6 ratios - P/S, P/E, P/B, P/CF, EBITDA/EV, and Shareholder Yield (dividends+buybacks) - dramatically beat the market. These ideas can be seen backtested and further verified in the excellent "Quantitative Value" book. Given that during different market environments different value ratios perform better it's perhaps unsurprising that if you buy the cheapest stocks in the market based on this composite measure you can achieve better overall returns with less risk over the long run than using the ratios individually. Indeed between 1964 and 2009 O'Shaughnessy showed that using Price/Sales alone - the best 10% of stocks by Price to Sales rebalanced annually returned 14.49% with a standard deviation (volatility) of 20.68%. But by using the composite value factor instead the return was improved to 17.3% and the standard deviation reduced to 17.1%. O'Shaughnessy suggested that these returns could be even further improved by filtering the universe further for higher quality stock. the composite value ratio beats all the other single value ratios bar one - the 'earnings yield' or EBIT/Enterprise Value. For those that don't know this ratio is a core part of Joel Greenblatt's "Magic Formula" and very popular amongst value investors. value factor has been struggling in the past few years. It's a constant trial and error. What worked once loses its edge (or premium) eventually i.e. Fama French factors of size and value. ARBITRAGED AWAY Value vs Growth investing: There is no debate about whether value stocks have outperformed growth stocks, on average. Historically, cheaper stocks have earned higher returns than expensive stocks. **Talk about higher potential for growth for value, but recent value trap**: https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2494412 It should though be noted that 'cheap' stocks are often distressed, junk or low quality stocks and can often be highly volatile as a result - so one should be careful in the very highest ValueRank companies. Investing in these kind of stocks is known as 'bargain' investing and can require a lot of finesse. Instead of debating on the best style of investing, a safer way to invest in value stocks is to find the best quality value stocks or value stocks whose share prices are turning around - filtering the wheat from the chaff. Twinning the Value Rank with either the Quality Rank or the Momentum Rank to help avoid value traps. # Growth Investing Growth investors share a different view of the market compared to value investors. # Momentum Investing Momentum Ratio Winners: The winning ratios from a momentum perspective proved to be weighted towards earnings surprises - the balance of upward vs downward earnings revisions, standardised unexpected earnings, unexpected returns around earnings announcements, six month absolute price strength. Momentum and growth are very different things. Momentum is to do with the correlation between past prices and future prices. Growth investing is trying to identify companies whose future potential earnings growth has not been fully appreciated by the market. One is concerned with prices, the other earnings. Growth companies can have momentum and vice versa but the two are certainly not synonymous. Momentum is present in many asset classes such as commodities not just equities whereas growth investing obviously applies only to equities. Despite being a fairly naive strategy momentum has shown a pretty robust long term cumulative outperformance, whereas the evidence for growth strategies is less clear - probably because it is almost impossible to build a consistent growth strategy. Simply picking high P/E etc. companies that the market expects to grow quickly is the opposite of a value strategy and will earn a negative premium on average. Relying on brokers forecasts is often shown to be little better than rolling a dice, and following fund managers relies on a very long record to show that it is any better than chance. Buffett's strategy is is more akin to a quality and value strategy. Identifying companies that have good long term business fundamentals and economic moats so should earn a high long term returns on capital and then buying those businesses at a discount to a sober assessment of their true worth. # Quality Investing This process is often referred to as a QARP (quality at a reasonable price) or GARP (growth at a reasonable price) investing process. # Income Investing https://seekingalpha.com/article/4313691-28-dividend-kings-ranked-quality-score Chowder Rule https://dividendearner.com/using-chowder-rule/#:~:text=The%20Chowder%20Rule%20simply%20tries,dividend%20growth%20and%20stock%20appreciation. # Factor Investing What works in investing is a completely open secret - cheap stocks tend to outperform expensive stocks, and stocks which surprise the market's expectations tend to continue to surprise. The problem is that both these general strategies (known as value and momentum) have a tendency to be extremely volatile when used alone. Value strategies can under perform the indices for significant periods of time while momentum strategies can suffer sharp reversals. It is well understood though, both by practitioners and academics that blending the two approaches has the twin impact of lowering the downside risk, while preserving the upside potential. Value + Momentum has been found to be a market beating strategy not only in the stock market but across bonds, currencies and every other asset class. A recent famed research paper titled "Buffett's Alpha" showed that almost all of Warren Buffett's outperformance over the years can be explained by the 'general tendency of high quality, safe, cheap stocks to outperform'. Meanwhile the late, esteemed finance professor Robert Haugen proved that cheap, quality, growing stocks with momentum have a tendency to dramatically outperform the market. What drives stock returns? A factor can be thought of as any characteristic relating a group of securities that is important in explaining their return and risk. A large body of academic research highlights that long term equity portfolio performance can be explained by factors. Certain factors have historically earned a long-term risk premium and represent exposure to systematic sources of risk. Factor investing is the investment process that aims to harvest these risk premia through exposure to factors. ## Standing on the Shoulders of Giants Evaluating risk is not only about evaluating the amount of potential loss. It allows us to set reasonable expectations for returns and make well-informed decisions about potential investments. Quantifying the sources of risk associated with a portfolio can reveal to what extent the portfolio is actually accomplishing a stated investment goal. If an investment strategy is described as targeting market and sector neutrality, for example, the underlying portfolio should not be achieving significant portions of its returns from a persistent long exposure to the technology sector. While this strategy may show profit over a given timeframe, understanding that those profits are earned on the basis of unintended bets on a single sector may lead the investor to make a different decision about whether and how much capital to allocate. Quantifying risk exposures allows investors and managers to create risk management strategies and refine their portfolio. Developing a risk model allows for a clear distinction between common risk and specific risk. **Common risk** is defined here as risk attributable to common factors which drive returns within the equity stock market. These factors can be composed of either fundamental or statistical information about the underlying investment assets that make up the market. * Fundamental factors are often observable fundamental ratios reported by companies that issue stock, such as the ratio of book value to share price, or earnings per share. These factors are typically derived from financial and macroeconomic sources of data. * Statistical factors use mathematical models to explain the correlations between asset returns time-series without consideration of company- specific fundamental data (Axioma, Inc. 2011). Some commonly-cited risk factors are the influence of an overall market index, as in the Capital Asset Pricing Model (CAPM) (Sharpe 1964), risk attributable to investing within individual sectors, which give an idea of the space a company works within, as in the BARRA risk model (BARRA, Inc. 1998), or style factors, which mimic investment styles such as investing in “small cap” companies or “high growth” companies, as in the Fama-French 3-factor model . **Specific risk** is defined here as risk that is unexplainable by the common risk factors included in a risk model. Typically, this is represented as a residual component left over after accounting for common risk (Axioma, Inc. 2011). When we consider risk management in the context of quantitative trading, our understanding of risk is used in large part to clarify our definition of "alpha". This residual after accounting for the common factor risk of a portfolio can be thought of as a proxy for or estimate of the alpha of the portfolio. ### Capital Asset Pricing Model Your asset or portfolio is exposed to the overall market, which inherently involves a risk, the **systematic risk**. The more it is exposed to it, the more it depends on its fluctuations, so the riskier it is, and the more you should be compensated for taking that additional risk. The Capital Asset Pricing Model (CAPM in short) attempts to explain the expected return of a security as a function of one risk factor, the market risk premium. It is the influence of the market on the security. It is computed as the excess market returns, in other words the additional return an investor expects from holding a risky market portfolio instead of risk-free assets. Investors expect to be compensated for risk and the time value of money. * The risk-free rate in the CAPM formula accounts for the time value of money. * The other components of the CAPM formula account for the investor taking on additional risk. $$ R_i - R_f = \alpha^J + \beta_i * (R_m - R_f)_t + \epsilon_t $$ where * $R_i$ is the expected return of the security * $R_f$ is the risk-free rate of return, or that of a hypothetical investment with no risk of financial loss (i.e. monthly Treasury Bill (t-bill) rate) * $R_m$ is the expected market return * $R_m - R_f$ is the market risk premium (excess market returns) * $R_i - R_f$ is the monthly return to the asset of concern in excess of the monthly t-bill rate. * $\beta_i$ is the Beta of the investment, the measure of systematic risk. It represents the influence of the market on the excess return of the investment, i.e. the volatility of the investment as compared to the overall market. Once the market risk premium and risk free rate are defined, the $\beta$ coefficient can be determined by linear regression. The intercept in this model is referred to as the "Jensen's alpha". Intuitively, the more a security is exposed to systematic risk i.e. the overall market (captured by the Beta), the more return one should expect from that security. Another way to think about is, since the risk-free rate can be obtained with no risk, any other investment having some risk will have to have a higher rate of return in order to induce any investors to hold it. Your return should therefore be proportional to your exposure to that risk factor, which is captured by the Beta. ``` ``` The CAPM is a simple model and is most commonly used in the finance industry. It is used in the calculation of the Weighted Average Cost of Capital/ Cost of equity. But this model is based on a few slightly unreasonable assumptions such as ‘the riskier the investment, the higher the return’ which might not be necessarily true in all the scenarios, an assumption that historical data accurately predicts the future performance of the asset/stocks, etc. Furthermore, it uses only one variable to describe the returns of a portfolio or stock with the returns of the market as a whole. What if there are many factors and not just one which determines the rate of return? Because these patterns in average returns apparently are not explained by the CAPM, they are called anomalies. ### Arbitrage Pricing Theory The arbitrage pricing theory was developed by the economist Stephen Ross in 1976, as an alternative to the capital asset pricing model (CAPM). Unlike the CAPM, which assume markets are perfectly efficient, APT assumes markets sometimes misprice securities, before the market eventually corrects and securities move back to fair value. Using APT, arbitrageurs hope to take advantage of any deviations from fair market value. However, this is not a risk-free operation in the classic sense of arbitrage, because investors are assuming that the model is correct and making directional trades—rather than locking in risk-free profits. It allows us to measure the influence of more than one factor when considering the forces that drive returns. APT is therefore a multi-factor asset pricing model based on the idea that an asset's returns can be predicted using the linear relationship between the asset’s expected return and a number of macroeconomic variables that capture systematic risk. It is a useful tool for analyzing portfolios from a value investing perspective, in order to identify securities that may be temporarily mispriced. APT expresses the returns of individual assets using a multiple linear regression, a linear factor model, like so: $$ R_i = {\alpha}_i + {\beta}_{i,0}F_0 + {\beta}_{i,1}F_1 + ... + {\beta}_{i,m}F_m + {\epsilon}_i$$ ### Barra Risk Factor Analysis Another category of risk factors are those attributable to investing within individual sectors, which give an idea of the space a company works within. The Barra Risk Factor Analysis is a multi-factor model, created by Barra Inc., used to measure the overall risk associated with a security relative to the market. Barra Risk Factor Analysis incorporates over 40 data metrics, including earnings growth, share turnover and senior debt rating. The model then measures risk factors associated with three main components: industry risk, the risk from exposure to different investment themes and company-specific risk. An element that investors and portfolio managers scrutinize when evaluating the markets or portfolios is investment risk. Identifying and measuring investment risk is one of the most important steps taken when deciding what assets to invest in. This is because the level of risk taken determines the level of return that an asset or portfolio of assets will have at the end of a trading cycle. Consequently, one of the most widely accepted financial principles is the tradeoff between risk and return. One method that a portfolio manager might use to measure investment risk is evaluating the impact of a series of broad factors on the performance of various assets or securities. Using a factor model, the return-generating process for a security is driven by the presence of the various common fundamental factors and the asset's unique sensitivities to each factor. Since a few important factors can explain the risk and return expected on investment to a large degree, factor models can be used to evaluate how much of a portfolio's return is attributable to each common factor exposure. Factor models can be broken down into single-factor and multiple-factor models. One multi-factor model that can be used to measure portfolio risk is the Barra Risk Factor Analysis model. The Barra Risk Factor Analysis was pioneered by Bar Rosenberg, founder of Barra Inc., and is discussed at length in Grinold and Kahn (2000), Conner et al (2010) and Cariño et al (2010). It incorporates a number of factors in its model that can be used to predict and control risk. The multi-factor risk model uses a number of key fundamental factors that represent the features of an investment. Some of these factors include yield, earnings growth, volatility, liquidity, momentum, size, price-earnings ratio, leverage, and growth; factors which are used to describe the risk or returns of a portfolio or asset by moving from quantitative, but unspecified, factors to readily identifiable fundamental characteristics. The Barra Risk Factor Analysis model measures a security's relative risk with a single value-at-risk (VaR) number. This number represents a percentile rank between 0 and 100, with 0 being the least volatile and 100 being the most volatile, relative to the U.S. market. For instance, a security with a value-at-risk number of 80 is calculated to have a greater level of price volatility than 80% of securities in the market and its specific sector. So, if Amazon is assigned a VaR of 80, it means that its stock is more price volatile than 80% of the stock market or the sector in which the company operates. ### Fama-French Three-Factor Model A final category of risk factors are style factors, which mimic investment styles such as investing in “small cap” companies or “high growth” companies, as in the Fama-French 3-factor model . In 1996, Fama and French observed that two classes of stocks have tended to do better than the market as a whole: (i) small caps and (ii) stocks with a high book-to-market ratio (B/P, customarily called value stocks, contrasted with growth stocks). They have thus identified three risk factors to describe stock returns. |Factor|Idea|Symbol|Calculated| |------|-|-|-| |Market Risk Premium|additional return an investor expects from holding a risky market portfolio instead of risk-free assets|$R_m - R_f$|It is calculated as the monthly return of the CRSP value-weighted index less the risk free rate| |Size Premium|historical tendency for the stocks of firms with smaller market capitalizations to outperform the stocks of firms with larger market capitalizations|$SMB$ (Small Minus Big, in terms of Market Cap)|| |Value Premium|outperformance of high book / market versus small book / market companies|$HML$ (High Minus Low, in terms of Book-to-Market)|| To compute those values, the stock universe considered is all NYSE, AMEX, and NASDAQ stocks for which they have ME for December of t-1 and June of t, and BE for t-1. * SMB is a zero-investment portfolio that is long on small capitalization (cap) stocks and short on big cap stocks. * HML is a zero-investment portfolio that is long on high book-to-market (B/M) stocks and short on low B/M stocks. Portfolios are formed on B/M at the end of each June using NYSE breakpoints. The BE used in June of year t is the book equity for the last fiscal year end in t-1. ME is price times shares outstanding at the end of December of t-1. * BE < 0; bottom 30%, middle 40%, top 30%; quintiles; deciles. * Firms with negative book equity are in only the BE < 0 portfolio. The premiums are computed on a monthly basis in their methodology. They then added those two additional factors to CAPM to reflect a portfolio's exposure to these two classes: $$ R_i - R_f = \alpha^{FF} + \beta_{mkt} * (R_m - R_f)_t + \beta_{SMB} * SMB_t + \beta_{HML} * HML_t + \epsilon_t$$ The market risk, $\beta$ which is analogous to the classical $\beta$ (but not equal to it, since there are now two additional factors to do some of the work) Once SMB and HML are defined, the corresponding beta coefficients are determined by linear regressions and can take negative values as well as positive values. The intercept in this model i.e. $\alpha^{FF}$ is referred to as the "three-factor alpha" The more exposure our portfolio has on small caps and high book-to-value, the higher those coefficients (risk factors) will be, so the higher the expected return. They find that, except for the continuation of short-term returns, the anomalies largely disappear in a three-factor model. Their results are consistent with rational ICAPM or APT asset pricing, but they also consider irrational pricing and data problems as possible explanations. However, the size and book/market ratio themselves are not in the model. For this reason, there is academic debate about the meaning of the last two factors. ### Carhart Four Factor Model In 1997, Mark Carhart extended the Fama-French three-factor model to include a momentum factor, UMD (short for Up Minus Down, or MOM, short for monthly momentum), from Jegadeesh and Titman's paper. https://alphaarchitect.com/2016/10/14/how-to-measure-momentum/ French Fama: https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/Data_Library/det_mom_factor_daily.html Carhart: https://breakingdownfinance.com/finance-topics/equity-valuation/carhart-4-factor-model/#:~:text=Carhart%204%20factor%20model%20equation&text=where%20Mkt%20is%20the%20return,easily%20be%20estimated%20using%20OLS. Momentum in a stock is described as the tendency for the stock price to continue rising if it is going up and to continue declining if it is going down. The MOM can be calculated by subtracting the equal weighted average of the lowest performing firms from the equal weighed average of the highest performing firms, lagged one month (Carhart, 1997). A stock is showing momentum if its prior 12-month average of returns is positive. Similar to the three factor model, momentum factor is defined by self-financing portfolio of (long positive momentum)+(short negative momentum). Momentum strategies continue to be popular in financial markets such that financial analysts incorporate the 52-week price high/low in their Buy/Sell recommendations $$ R_i - R_f = \alpha^C + \beta_{mkt} * (R_m - R_f) + \beta_{SMB} * SMB_t + \beta_{HML} * HML_t + \beta_{UML} * UMD_t + \epsilon_t$$ Here, UMD is the fourth risk factor, representing the monthly premium on winner minus losers. UMD is a zero-cost portfolio that is long previous 12-month return winners and short previous 12-month loser stocks. The intercept in this model is referred to as the "four-factor alpha" ### Fama-French Five-Factor Model In 2015, Fama and French extended the model, adding a further two factors -- profitability and investment. Defined analogously to the HML factor, the profitability factor (RMW) is the difference between the returns of firms with robust (high) and weak (low) operating profitability; and the investment factor (CMA) is the difference between the returns of firms that invest conservatively and firms that invest aggressively. In the US (1963-2013), adding these two factors makes the HML factors redundant since the time series of HML returns are completely explained by the other four factors (most notably CMA which has a -0.7 correlation with HML). Whilst the model still fails the Gibbons, Ross & Shanken (1989) test, which tests whether the factors fully explain the expected returns of various portfolios, the test suggests that the five-factor model improves the explanatory power of the returns of stocks relative to the three-factor model. The failure to fully explain all portfolios tested is driven by the particularly poor performance (i.e. large negative five-factor alpha) of portfolios made up of small firms that invest a lot despite low profitability (i.e. portfolios whose returns covary positively with SMB and negatively with RMW and CMA). If the model fully explains stock returns, the estimated alpha should be statistically indistinguishable from zero. ## Risk-Premia Factors In this section, we will analyze and improve on the factors that were described in those seminal works. ### Value Factor Value: composite of trailing cash-flow yield, earnings yield and country relative sales to price ratio ### Size Factor Size: full market capitalization A strong large-cap bias. ... We are biased towards companies displaying positive earnings and price momentum. ### Momentum Factor Momentum: residual Sharpe ratio ### Quality Factor Quality: composite of profitability (return on assets), efficiency (change in asset turnover), earnings quality (accruals) & leverage ### Volatility Factor Volatility: standard deviation of 5 years of weekly (wed/wed) local total returns ### Liquidity Factor Liquidity: Amihud ratio – median ratio of absolute daily return to daily traded value over the previous year ### Dividend Yield Factor ### Growth Factor It should be noted that Growth in general has been shown to have strong mean reverting tendencies. What this means is that strong growth stocks often become moderate growth stocks. We caution investors from getting too optimistic about Growth forecasts in general. Our opinion is that it makes more sense to focus on Quality than Growth - academia and practitioner experience suggests it is more predictive of future growth. ## Combining Alphas https://www.quantopian.com/posts/alphalens-a-new-tool-for-analyzing-alpha-factors#:~:text=Alphalens%20is%20a%20Python%20package,of%20information%20and%20future%20returns. Alpha factors express a predictive relationship between some given set of information and future returns. By applying this relationship to multiple stocks we can hope to generate an alpha signal and trade off of it. a. **Single Alpha Factor Modeling:** define and evaluate individual expressions which rank the cross section of equities in your universe. The following information can tell you if the alpha factor you found is predictive; whether you have found an "edge." These statistics cover: * Returns Analysis * Information Coefficient Analysis * Turnover Analysis * Sector Analysis They studied each company's ratio on an absolute basis against the entire universe (e.g. large caps in Europe), against their local market index (e.g. FTSE 100) and against their sector. They also tested each ratio relative to each stock and sector's historical average in what looks like a very comprehensive backtest. To figure out which ratios to include in the end portfolio, they picked the ratios that generated portfolios of stocks with the highest Sharpe Ratios - essentially these are the portfolios offering the highest returns for the least volatility. Also maybe avoid picking the ones that are too correlated *Predictability window*: What's fascinating about momentum strategies versus value strategies is that they work on completely different timeframes. The winning value ratios all work best on 12-24 month time horizons, whereas the momentum ratios work best on 3-6 month time horizons. As a result, the optimal blended portfolios have holding periods of 12 months - a boon for individual investors who want to put this kind of strategy to work, but don't want to trade like amphetamine fueled schizophrenic rabbits. b. **Alpha Combination:** combine many single alphas into a final alpha which has stronger prediction power than the best single alpha. This is often due to the noise in each alpha being canceled out by noise in other alphas, allowing signal to come through. Beta is a measure of the returns (of the asset or portfolio of assets) which are attributed to the market. The farther from zero, the more sensitive it is to the returns of the market (> 0 is same way, < 0 is opposite way). Alpha is a measure of the returns which are NOT attributed to the market. It's your edge. When we combine criteria like in the paradigms above, we do not know how each criteria (or signal), singled out, provides to the alpha. And we don't know: * Just because you run a backtest and is profitable, you don't know whether the signal you're trading on has any alpha, you might have just gotten lucky. * Alternatively, if your backtest is not profitable, that doesn't mean your signal doesn't have alpha. It means your strategy was bad. Maybe other factors in the strategy helped making it good or bad i.e. weight allocation, rebalancing etc., so better to isolate the concern of alpha factors and test them separately. After that, we find those that complement each other i.e. a factors that underperformed compared to another that outperformed during a certain period of time and vice versa. To determine whether something is a factor, you should be able to rank stocks based on such factor, and allocate to a different portfolio, and observe monotically decreasing returns. ### Value and Momentum Value + Momentum when paired alone is seen as one of the most successful stock market strategies. Societe Generale Quant team publish a 'WISE' investment strategy that significantly outperforms the market combining a simple summed score of value and momentum factors. The two strategies are complementary as value tends to prosper when momentum lags and vice versa. Given that the returns to value and momentum are fairly uncorrelated. ### Value and Quality Value + Quality when paired is very similar to Joel Greenblatt's approach in his famous book 'The Little Book that Beats the Market'. Our own backtests have shown that this approach is highly effective at generating market beating returns and is most similar to the approach of Warren Buffett and other investors who like to buy quality stocks when marked down. ### Quality and Momentum Quality + Momentum may ignore price, but also performs remarkably well. Many traders, such as Bill O'Neil, are willing to buy highly priced stocks as the real market leaders tend to sell at a premium. These are often stocks that are expensive for good reason - due to continued blistering share price growth, earnings growth and exceptional cashflow, profitability and financial results. These stocks can still be worth purchasing, though greater caution should be taken with any highly priced stock as the volatility can be high (e.g. if the company starts to disappoint).
github_jupyter
``` import glob import json import tarfile import pandas as pd import numpy as np import os as os from datetime import datetime from dateutil.relativedelta import relativedelta from gensim.models import Word2Vec from itertools import chain pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', 16) class CHFDataset: def __init__(self): self.emb = None def set_dataset(self, path, sample_size=None, additional_path=None): dataset = {} for json_patient in ExtractJsonFile(path, sample_size, additional_path): id = json_patient['entry'][0]['resource']['id'] dataset[id] = {} dataset[id]['codes'] = [] dataset[id]['chf'] = None for bundle in ExtractEntry(json_patient): if (bundle.get('encounter_type') == "IMP" or bundle.get('encounter_type') == "EMER" or bundle.get('encounter_type') == "AMB") \ and bundle['code'] == "Chronic congestive heart failure (disorder)": if dataset[id]['chf'] is None: dataset[id]['chf'] = bundle['start_date'] break dataset[id]['codes'].append([bundle['start_date'], bundle['code']]) self.emb = Embeddings(dataset) class MYOINFDataset: def __init__(self): self.emb = None def set_dataset(self, path, sample_size=None, additional_path=None): dataset = {} for json_patient in ExtractJsonFile(path, sample_size, additional_path): id = json_patient['entry'][0]['resource']['id'] dataset[id] = {} dataset[id]['codes'] = [] dataset[id]['myoinf'] = None for bundle in ExtractEntry(json_patient): if (bundle.get('encounter_type') == "IMP" or bundle.get('encounter_type') == "EMER" or bundle.get('encounter_type') == "AMB") \ and bundle['code'] == "Myocardial Infarction": if dataset[id]['myoinf'] is None: dataset[id]['myoinf'] = bundle['start_date'] break dataset[id]['codes'].append([bundle['start_date'], bundle['code']]) self.emb = Embeddings(dataset) class ExtractJsonFile: def __init__(self, path, sample_size=None, additional_path=None): self.path = path self.sample_size = sample_size self.additional_path = additional_path def __iter__(self): fhircodes = {} counter, n = 0, 1 with tarfile.open(self.path, "r:gz") as tfile: for member in tfile: if (member.isdir()): continue yield pd.read_json(tfile.extractfile(member)) counter = counter+1 if (counter == n): print("Processed " + str(n) + " files") n = n*2 if (self.sample_size == counter): break # Read an equal number of files to that of the target path additional_path_size = counter counter, n = 0, 1 if self.additional_path is not None: with tarfile.open(self.additional_path, "r:gz") as tfile: for member in tfile: if (member.isdir()): continue yield pd.read_json(tfile.extractfile(member)) counter = counter+1 if (counter == n): print("Processed " + str(n + additional_path_size) + " files") n = n*2 if (additional_path_size == counter): break class ExtractEntry: def __init__(self, json_patient): self.json_patient = json_patient self.filter = set(["Allergic disorder initial assessment", "Encounter for 'check-up'", "Encounter for check up (procedure)", "Encounter for symptom", "Encounter for problem", "Encounter for problem (procedure)", "Emergency Encounter", "Emergency room admission (procedure)", "General examination of patient (procedure)", "Outpatient procedure", "Urgent care clinic (procedure)", "Well child visit (procedure)", "Medication Reconciliation (procedure)"]) def __iter__(self): for entry in self.json_patient['entry']: codes = [] end_date, encounter_type = None, None resource_type = entry['resource']['resourceType'] if resource_type == "Encounter": start_date = entry['resource']['period']['start'][0:10] end_date = entry['resource']['period']['end'][0:10] encounter_type = entry['resource']['class']['code'] try: codes.append(entry['resource']['reasonCode'][0]['coding'][0]['display']) except: codes.append(entry['resource']['type'][0]['coding'][0]['display']) if resource_type == "Procedure": start_date = entry['resource']['performedPeriod']['start'][0:10] codes.append(entry['resource']['code']['coding'][0]['display']) if resource_type == "Condition": start_date = entry['resource']['onsetDateTime'][0:10] codes.append(entry['resource']['code']['coding'][0]['display']) if resource_type == "CarePlan": start_date = entry['resource']['period']['start'][0:10] try: activity = entry['resource']['activity'] except: continue for plan in activity: codes.append(plan['detail']['code']['coding'][0]['display']) for code in codes: if code not in self.filter: yield {'start_date':start_date, 'end_date':end_date, 'code':code, 'encounter_type':encounter_type} class Embeddings: def __init__(self, dataset): self.dataset = dataset self.onehot = None self.wordemb = None def generate_onehot(self, label, window_range, predict_range, step_size=0): df_dataset = pd.DataFrame.from_dict(self.dataset).T.reset_index().rename(columns={'index':'id'}) self.onehot = [] offset = step_size + predict_range for df, id in self.get_df_from_range(df_dataset, label, window_range, offset): # Flatten df['codes'] into array[month][code] = 1 arr_codes = [] for codes in df['codes']: code_dict = {} for code in codes: code_dict[code] = 1 arr_codes.append(code_dict) # Add flattened codes to df df = df.join(pd.DataFrame.from_dict(arr_codes).fillna(0)).drop(columns=['codes']) # Add label if (self.dataset[id][label] is not None): df[label] = 1 else: df[label] = 0 self.onehot.append(df) self.normalize(label) def generate_wordemb(self, label, window_range, predict_range, step_size=0): df_dataset = pd.DataFrame.from_dict(self.dataset).T.reset_index().rename(columns={'index':'id'}) dfs, master_list = [], [] offset = step_size + predict_range for df, id in self.get_df_from_range(df_dataset, label, window_range, offset): master_list.append(list(chain.from_iterable(df["codes"]))) if (self.dataset[id][label] is not None): df[label] = 1 else: df[label] = 0 dfs.append(df) # Generate embeddings from master list master_embeddings = Word2Vec(master_list, size=100, window=2, min_count=1, workers=2, sg=1) # Average each month's embeddings per patient self.wordemb = [] for df in dfs: emb = [] for index, row in df.iterrows(): sum = None for code in row['codes']: if sum is None: sum = np.array(master_embeddings[code]) else: sum = np.add(sum, np.array(master_embeddings[code])) if sum is not None: emb.append(np.divide(sum, len(row['codes']))) else: # append 100 vector instead of empty tuple emb.append(()) self.wordemb.append(np.asarray((emb, df[label][0]))) def get_df_from_range(self, df, label, window_range, offset): # yields one patient's df at a time for index, row in df.iterrows(): id = row['id'] # Get specified range try: # via label end_range = pd.to_datetime(datetime.strptime(self.dataset[id][label], '%Y-%m-%d').date()) except: # via latest date try: end_range = pd.to_datetime(datetime.strptime(row['codes'][-1][0], '%Y-%m-%d').date()) except: continue end_range = end_range - relativedelta(months=offset) start_range = end_range - relativedelta(months=window_range) # Set DataFrame to range df = pd.DataFrame(row['codes']).rename(columns={0:'date', 1:'codes'}) try: df['date'] = pd.to_datetime(df['date']) except: # no codes exist between the start_range and end_range continue df = df[df['date'].between(start_range, end_range)].set_index('date') if len(df) == 0: continue # Group codes by month df = df.groupby(pd.Grouper(freq='M')) df = df.aggregate(lambda x: tuple(x)).reset_index() # Fill in missing months df.set_index('date', inplace=True) df = df.reindex(pd.date_range(start_range + relativedelta(months=1), end_range + relativedelta(months=1), freq='M')) df['codes'] = df['codes'].apply(lambda x: x if pd.notnull(x) else ()) # remove excess months while len(df) > window_range: df = df.shift(-1) df = df[:-1] df = df.reset_index(drop=True) yield df, id def normalize(self, training_label): all_columns = [] for frame in self.onehot: all_columns.extend(x for x in frame.columns.tolist() if not x in all_columns) final_frames = [] for df in self.onehot: cols = df.columns.tolist() cols.extend(x for x in all_columns if not x in cols) df = df.reindex(columns=sorted(cols, reverse=False), fill_value=0) col = df[training_label] # move training_label column to end of dateframe df.drop(labels=[training_label], axis=1,inplace = True) df[training_label] = col final_frames.append(df) self.onehot = final_frames def shuffleColumns(self, dfs, training_label, num_shuffled): dfs_shuffled = [] for _ in range(num_shuffled): df_shuffled = dfs.copy() # makes column labels the first row. (numpy only works with numbered columns so this preserves our label names) df_shuffled[0] = pd.DataFrame(np.vstack([df_shuffled[0].columns, df_shuffled[0]])) # randomize columns using numpy arr = df_shuffled[0].to_numpy() np.random.shuffle(arr.T) # convert back to pandas dataframe df_shuffled[0] = pd.DataFrame(arr) df_shuffled[0].columns = df_shuffled[0].iloc[0] df_shuffled[0] = df_shuffled[0].drop(df_shuffled[0].index[0]).reset_index(drop=True) # move training_label to end of dataframe col = df_shuffled[0][training_label] df_shuffled[0].drop(labels=[training_label], axis=1, inplace = True) df_shuffled[0][training_label] = col # reindex all dfs on df_shuffled[0] for i in range(len(df_shuffled)): df_shuffled[i] = df_shuffled[i].reindex(df_shuffled[0].columns, axis=1) dfs_shuffled.append(df_shuffled) return dfs_shuffled class OSUtil: @staticmethod def import_onehot_csv(path): dfs_onehot = [] for file in glob.glob(path + "/*"): try: dfs_onehot.append(pd.read_csv(file, index_col ='Unnamed: 0')) except Exception as e: print(e) continue return dfs_onehot @staticmethod def export_csv(path, dfs, overwrite=False): if (os.path.exists(path) == False): !mkdir $path if (overwrite == True): !rm $path"/*.csv" for i in range(len(dfs)): dfs[i].to_csv(path + '/patient' + str(i) + '.csv') @staticmethod def export_npy(path, nps, overwrite=False): if (os.path.exists(path) == False): !mkdir $path if (overwrite == True): !rm $path"/*.csv" for i in range(len(nps)): np.save(path + '/matrix' + str(i), nps[i]) @staticmethod def zip_folder(input_path, output_path): if ".zip" not in output_path: output_path = output_path + ".zip" !zip -r "$output_path" "$input_path" @staticmethod def unzip_folder(input_path, output_path="."): if ".zip" not in input_path: input_path = input_path + ".zip" !unzip "$input_path" -d "$output_path" @staticmethod def delete_folder(path): !rm -rf $path def sum_codes(onehot, label): df_no_label, df_label = [], [] for df in onehot: if df[label][0] == 0: df_no_label.append(df) else: df_label.append(df) sum_zero_label = df_no_label[0].sum() for i in range(1, len(df_no_label)): sum_zero_label = sum_zero_label.add(df_no_label[i].sum()) sum_one_label = df_label[0].sum() for i in range(1, len(df_label)): sum_one_label = sum_one_label.add(df_label[i].sum()) print("0 labels:", len(df_no_label), "\n1 labels:", len(df_label)) return sum_zero_label.sort_values(ascending=False), sum_one_label.sort_values(ascending=False) ``` ### Congestive Heart Failure: pre-processing & embedding generation ``` chf = CHFDataset() chf.set_dataset("/content/drive/Shared drives/Anthem Project/Data/synthea-data-general.tar.gz", sample_size=1000) chf.emb.generate_onehot(label="chf", window_range=24, predict_range=1) chf.emb.generate_wordemb(label="chf", window_range=24, predict_range=1) display(chf.emb.onehot[1]) display(chf.emb.wordemb[1]) sum_codes(chf.emb.onehot, 'chf') ``` ### Myocardial Infarction: pre-processing & embedding generation ``` myoinf = MYOINFDataset() myoinf.set_dataset("/content/drive/Shared drives/Anthem Project/Data/synthea-data-general.tar.gz", sample_size=1000) myoinf.emb.generate_onehot(label="myoinf", window_range=24, predict_range=1) myoinf.emb.generate_wordemb(label="myoinf", window_range=24, predict_range=1) ``` ### Export embeddings and compress ``` OSUtil.export_csv("csv-chf-onehot", chf.emb.onehot) OSUtil.export_npy("csv-chf-wordemb", chf.emb.wordemb) OSUtil.export_csv("csv-myoinf-onehot", myoinf.emb.onehot) OSUtil.export_npy("csv-myoinf-wordemb", myoinf.emb.wordemb) OSUtil.zip_folder('csv-chf-onehot', 'csv-chf-onehot') OSUtil.zip_folder('csv-chf-wordemb', 'csv-chf-wordemb') OSUtil.zip_folder('csv-myoinf-onehot', 'csv-myoinf-onehot') OSUtil.zip_folder('csv-myoinf-wordemb', 'csv-myoinf-wordemb') ```
github_jupyter
#### PYT-DS SAISOFT * [Overview 1](https://github.com/4dsolutions/Python5/blob/master/OverviewNotes_PYTDS.ipynb) * [Overview 3](https://github.com/4dsolutions/Python5/blob/master/OverviewNotes_PYTDS_3.ipynb) <a data-flickr-embed="true" href="https://www.flickr.com/photos/kirbyurner/27963484878/in/album-72157693427665102/" title="Barry at Large"><img src="https://farm1.staticflickr.com/969/27963484878_b38f0db42a_m.jpg" width="240" height="180" alt="Barry at Large"></a><script async src="//embedr.flickr.com/assets/client-code.js" charset="utf-8"></script> # DATA SCIENCE WITH PYTHON ## Where Have We Been, What Have We Seen? Data Science includes Data Management. This means we might call a DBA (Database Administrator) a kind of data scientist? Why not? Their speciality is efficiently warehousing data, meaning the same information is not redundantly scattered. In terms of rackspace and data center security, of course we want redundancy, but in databases the potential for data corruption increases exponentially with the number of places the same information must be kept up to date. If a person changes their legal name, you don't want to have to break your primary key, which should be based on something less mutable. Concepts of mutability versus immutability are important in data science. In consulting, I would often advertise spreadsheets as ideal for "what if" scenarios, but if the goal is to chronicle "what was" then the mutability of a spreedsheet becomes a liability. The bookkeeping community always encourages databases over spreadsheets when it comes to keeping a company or agency's books. DBAs also concern themselves with missing data. If the data is increasingly full of holes, that's a sign the database may no longer be loved. DBAs engage in load balancing, meaning they must give priority to services most in demand. However "what's in demand" may be a changing vista. ``` import pandas as pd import numpy as np rng_years = pd.period_range('1/1/2000', '1/1/2018', freq='Y') ``` People needing to divide a fiscal year starting in July, into quarters, are in luck with ```pandas```. I've been looking for lunar year and other periodic progressions. The whole timeline thing still seems difficult, even with a proleptic Gregorian plus UTC timezones. ``` head_count = np.random.randint(10,35, size=19) ``` As usual, I'm recommending telling yourself a story, in this case about an exclusive party you've been hosting ever since 2000, all the way up to 2018. Once you get the interactive version of this Notebook, you'll be able to extend this record by as many more years as you want. ``` new_years_party = pd.DataFrame(head_count, index = rng_years, columns=["Attenders"]) ``` DBAs who know SQL / noSQL, will find pandas, especially its ```inner``` ```outer``` ```left``` and ```right``` merge possibilities somewhat familiar. We learn about the ```set``` type through maths, through Python, and understand about unions and intersections, differences. We did a fair amount of practicing with merge, appreciating that pandas pays a lot of attention to the DataFrame labels, synchronizing along indexes and columns, creating NaN empty cells where needed. We're spared a lot of programming, and yet even so though these patchings- together can become messy and disorganized. At least the steps are chronicled. That's why spreadsheets are not a good idea. You lose your audit trail. There's no good way to find and debug your mistakes. Keep the whole pipeline in view, from raw data sources, through [numerous cleaning and filtering steps](https://youtu.be/qvHXRuGPHl0). The linked Youtube is a good example: the data scientist vastly shrinks the data needed, by weeding out what's irrelevant. Data science is all about dismissing the irrelevant, which takes work, real energy. ``` new_years_party ``` What's the average number of party-goers over this nine-year period? ``` np.round(new_years_party.Attenders.mean()) ``` Might you also want the median and mode? Do you remember what those are? ``` new_years_party.Attenders.mode() ``` Now that seems strange. Isn't the mode of a column of numbers, a number? We're looking at the numbers that appear most often, the top six in the ranking. Surely there must be some tie breaking rule. ``` new_years_party.Attenders.median() ``` That's not years of age, lets remember, but the number clocked in at just after midnight, so still there for the beginning of the New Year. What other columns (features) were collected on these people? Do they know they were being surveilled? Would they recognize themselves, even if they saw this data? Or is this data totally made up. Come to think of it, we did use a randomizer now didn't we. <a data-flickr-embed="true" href="https://www.flickr.com/photos/kirbyurner/16149955921/in/photolist-JdgRnt-JdgNxa-239WrJR-JdgXVD-JdgQJp-Dve8wV-JdgZax-DveefD-Dve7S8-JdgXdr-239W9Ea-239Wb5V-DveeFD-DvedJD-239WsoB-JdgYxF-239Wahn-DvedjR-JdgZJZ-225jXoh-239VXTF-Dve75X-JdgJ9R-JdgJGK-JdgKo4-JdgL42-JdgHrt-239VX7a-F2iuns-21K5vNc-F2it6E-F2irFq-F2isp9-F2iqYd-F2itHm-F2ipcN-F2ipLo-F2imu3-237fAbJ-F2inqG-F2iqgb-F2ikwb-239VKTZ-JdfMD8-239VJa8-239VJZz-239VHGV-QfwD7s-QHZTcw-qB7KeT" title="New Years Ball"><img src="https://farm8.staticflickr.com/7498/16149955921_514a579c6b_n.jpg" width="320" height="303" alt="New Years Ball"></a><script async src="//embedr.flickr.com/assets/client-code.js" charset="utf-8"></script>
github_jupyter
<!--NOTEBOOK_HEADER--> *This notebook contains material from [cbe61622](https://jckantor.github.io/cbe61622); content is available [on Github](https://github.com/jckantor/cbe61622.git).* <!--NAVIGATION--> < [A.4 Scheduling Real-Time Events with Simpy](https://jckantor.github.io/cbe61622/A.04-Scheduling-Real-Time-Events-with-Simpy.html) | [Contents](toc.html) | [B.1 Particle Command Line Interface](https://jckantor.github.io/cbe61622/B.01-Particle_CLI.html) ><p><a href="https://colab.research.google.com/github/jckantor/cbe61622/blob/master/docs/B.00-Working_with_Particle.ipynb"> <img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://jckantor.github.io/cbe61622/B.00-Working_with_Particle.ipynb"> <img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a> # B.0 Working with Particle ## B.0.1 Why Particle? Many notebooks in this repository demonstrate the use of devices from [Particle.io](particle.io), a company that produces devices and services for the *Internet of Things* (IoT) industry. The IoT is evolving quickly with many participants, ranging from [Arduino.cc](arduino.cc) founded in 2005 for purpose of bringing to market new devices based on easy-to-use hardware and software, to long-established companies like Intel and Texas Instruments entering new markets, and to companies like [Particle founded in 2012](https://blog.particle.io/spark-is-now-particle/) intending to share in the growth of a new industry. There are other devices on the market that would support the educational purposes of these notebooks. The attractive features of Particle for this project include: * A complete cloud-based development platform. The development platform is also available as a command line interface that can be integrated with other services including, for example, Google Colaboratory. * A range of wireless device communications available including Bluetooth, Wifi, and cellular moden ## B.0.2 Particle Argon [hardware datasheet](https://docs.particle.io/datasheets/wi-fi/argon-datasheet/) [Device OS API](https://docs.particle.io/reference/device-os/firmware/argon/) ![](https://docs.particle.io/assets/images/argon/argon-top.png) <!--NAVIGATION--> < [A.4 Scheduling Real-Time Events with Simpy](https://jckantor.github.io/cbe61622/A.04-Scheduling-Real-Time-Events-with-Simpy.html) | [Contents](toc.html) | [B.1 Particle Command Line Interface](https://jckantor.github.io/cbe61622/B.01-Particle_CLI.html) ><p><a href="https://colab.research.google.com/github/jckantor/cbe61622/blob/master/docs/B.00-Working_with_Particle.ipynb"> <img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://jckantor.github.io/cbe61622/B.00-Working_with_Particle.ipynb"> <img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a>
github_jupyter
``` import pandas as pd import numpy as np import warnings warnings.filterwarnings("ignore") # Read the data df_btc = pd.read_csv("/Users/mz195/BTC_price_db.csv") df_overall = pd.read_csv("/Users/mz195/overall_predictions.csv") df = df_btc.merge(df_overall, on='time_') df_overall["price_avg"]=df["price_avg"] df_overall['overall'] = df_overall['overall'].shift(periods=-2) df= df_overall.dropna() df["diff"]= df["overall"] - df["price_avg"] df["Last_Updated_Signal"]=["NaN"]*len(df) df def update_signal(x): if x < 0: return "Resistance_Breakout" else: return "Support_Breakout" df["Last_Updated_Signal"] = df["diff"].apply(update_signal) df["Recommendation"]=["HOLD"]*len(df) x=0 while x < (len(df)-1): if df["Last_Updated_Signal"].iloc[x]=="Support_Breakout" and df["Last_Updated_Signal"].iloc[x+1]=="Resistance_Breakout": df["Recommendation"].iloc[x+1]="SELL" elif df["Last_Updated_Signal"].iloc[x] == "Resistance_Breakout" and df["Last_Updated_Signal"].iloc[x+1]=="Support_Breakout": df["Recommendation"].iloc[x+1]="BUY" else: df["Recommendation"].iloc[x+1]="HOLD" x=x+1 df transactions_df = df[(df['Recommendation'] == "BUY") | (df['Recommendation'] == "SELL")] transactions_df if len(transactions_df) > 3: if transactions_df["Recommendation"].iloc[0] == "SELL" or transactions_df["Recommendation"].iloc[len(transactions_df) -1] == "BUY": transactions_df.drop(index=transactions_df.index[0], axis=0,inplace=True) check = transactions_df["Recommendation"].iloc[[0,-1]] print(check) # Investment amount= 100000 transactions_num = len(transactions_df) transactions_fees = 0.5 # maybe we need to use if statements here total_fees = transactions_num * transactions_fees sell_email_notification = list(df.loc[df['Recommendation'] == "SELL"].index) buy_email_notification = list(df.loc[df['Recommendation'] == "BUY"].index) transactions_df["pct"]=["0"]*len(transactions_df) transactions_df["runing_amount"]=[0]*len(transactions_df) x=0 while x <= len(transactions_df)-1: if transactions_df["Recommendation"].iloc[x] == "SELL": transactions_df["pct"].iloc[x]=(transactions_df["price_avg"].iloc[x]-transactions_df["price_avg"].iloc[x-1])/transactions_df["price_avg"].iloc[x-1] if x<=1: transactions_df["runing_amount"].iloc[x]=(1+transactions_df["pct"].iloc[x])*amount else: transactions_df["runing_amount"].iloc[x]=(1+transactions_df["pct"].iloc[x])*transactions_df["runing_amount"].iloc[x-2] x=x+1 rtn_pct=(transactions_df["runing_amount"].iloc[-1])/ amount * 100 #dollar profit/loss profit_loss= transactions_df["runing_amount"].iloc[-1] - amount print(rtn_pct,"% \n",profit_loss,"\n") df[["price_avg","overall"]].plot(figsize=(16,9)) ```
github_jupyter
[![img/pythonista.png](img/pythonista.png)](https://www.pythonista.io) # Introducción a *Plotnine*. [*Pltonine*](https://plotnine.readthedocs.io/en/stable/) es un proyecto que aprovecha la gramática de gráficas por capas desarrollada por Hadley Wickham, el creador de [*ggplot2*](https://ggplot2.tidyverse.org/), la popular herramienta de visualización de datos para *R*. Para conocer más sobre la teoría de la gramática de capas, es pisble conasultar la siguiente liga: http://vita.had.co.nz/papers/layered-grammar.html ``` !pip install plotnine from plotnine import ggplot, aes, geom_line, geom_smooth, geom_histogram, geom_col, theme_xkcd, theme, element_text, lims, stat_quantile import pandas as pd import numpy as np from datetime import datetime from typing import Any ``` ## Gramática de capas de un gráfico. La gramática de capas define una estructura de elementos que condformnan un gráfico. * Datos y mapeo estético. * Objetos geométricos. * Escalas. * Especificación de faceta. * Transfromaciones específicas. * Sistema de coordenadas. ### Sintaxis de la gramática. ## La función ```ggplot()```. ``` ggplot(data=<dattos>, mapping=<estética>, <argumentos>) ``` ## La función ```aes()```. ## Funciones de geometría. https://plotnine.readthedocs.io/en/stable/api.html#geoms ``` np.random.seed(23523889) arreglo_base = pd.DataFrame(np.random.normal(12, 25, 1000), columns=pd.Index(['observaciones'])) arreglo_base ggplot(data=arreglo_base) ggplot(data=arreglo_base, mapping=aes(x='observaciones')) + geom_histogram() (ggplot(data=arreglo_base, mapping=aes(x='observaciones')) + geom_histogram(bins=10, fill='yellow', color="orange")) histograma = pd.DataFrame(np.histogram(arreglo_base, bins=13)).T histograma.columns = pd.Index(['frecuencias','rangos']) ggplot(histograma, aes(x='rangos', y='frecuencias', fill='rangos')) + geom_col() ggplot(histograma, aes(x='rangos', y='frecuencias', fill='rangos')) + geom_col() casos = pd.read_csv('data/casos_confirmados.csv').set_index('nombre') entidades = casos.T[2:].reset_index() cols = entidades.columns.to_list() cols[0] = 'fechas' entidades.columns = pd.Index(cols) entidades['fechas'] = pd.to_datetime(entidades['fechas'],infer_datetime_format=True) entidades (ggplot(entidades, aes(x='fechas', y='Nacional')) + geom_line() + geom_smooth(color='red')) (ggplot(entidades, aes(x='fechas', y='Nacional')) + geom_line() + geom_smooth(span=0.07, color='red')) (ggplot(entidades, aes(x='fechas', y='Nacional')) + geom_line() + geom_smooth(span=0.15, color='blue') + theme_xkcd()) data = entidades.iloc[365].to_frame()[1:-1].reset_index() data.columns = pd.Index(['Entidad', 'Casos']) data.info() (ggplot(data, aes(x='Entidad', y='Casos', fill='Casos')) + geom_col() + theme(axis_text_x=element_text(rotation=90, hjust=0.5))) data['Casos'] = data['Casos'].astype(int) (ggplot(data, aes(x='Entidad', y='Casos', fill='Casos')) + geom_col() + theme(axis_text_x=element_text(rotation=90, hjust=0.5))) ``` <p style="text-align: center"><a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Licencia Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p> <p style="text-align: center">&copy; José Luis Chiquete Valdivieso. 2021.</p>
github_jupyter
# Day 7 ## Problem 1 Find the position where the sum of energy consumption of crab submarines is minimum. - Crab submarine can move only horizontally. - Moving 1 position consumes 1 fuel. ## Approach (my thoughts) 1. Takes crabs' initial positions as a list of int, like crab_pos[] 2. When a target position is defined as "x", distance they need to move is to be absolute value of (crab_pos[i]-x). 3. x should be somewhere between max(crab_pos[]) and min(crab_pos[]). - to store result of the total fuel consumption, make a list of 0 in the same that covers from min to max of crab_pos and define it as fuel_cons[]. 4. For each potential x; - calculate absolute value of difference between each position and x - sum them up, and - store the result in the fuel_cons[]. 5. Find the min(fuel_cons[]). ``` import numpy as np crab_pos = np.loadtxt('example.txt', delimiter = ',', dtype = 'int') print(crab_pos) fuel_cons = np.zeros(max(crab_pos)-min(crab_pos)+1) for j in range(len(fuel_cons)): for i in crab_pos: fuel_cons[j] += abs(i-j) print(fuel_cons) print(min(fuel_cons)) ``` OK, looks good!! Probably this is the fastest and shortest solution that I could reach on my own?? ``` def min_fuel(input): crab_pos = np.loadtxt(input, delimiter = ',', dtype = 'int') # print(crab_pos) if min(crab_pos) != 0: print("minimum position is not 0, please reconsider code") return() fuel_cons = np.zeros(max(crab_pos)-min(crab_pos)+1) for j in range(len(fuel_cons)): for i in crab_pos: fuel_cons[j] += abs(i-j) # print(fuel_cons) return(min(fuel_cons)) min_fuel('example.txt') min_fuel('input.txt') ``` ## Problem 2 - Crabs' fuel consumption model is changed; it consumes n-fuels at n-th step. For example, if a crab needs to move 3 positions, then it consumes 1+2+3 = 6 fuels. - Again find the position where the sum of the fuel consumptions of all the crabs. ## Approach (my thoughts) `abs(i-j)` part should be redefined to incremental sum from 1 to the `abs(i-j)` if `abs(i-j) > 0`. Otherwise 0. Incremental sum can be calculated by `inc_sum = (1+abs(i-j))*abs(i-j)/2` ``` def min_fuel_incremental(input): crab_pos = np.loadtxt(input, delimiter = ',', dtype = 'int') # print(crab_pos) if min(crab_pos) != 0: print("minimum position is not 0, please reconsider code") return() fuel_cons = np.zeros(max(crab_pos)-min(crab_pos)+1) for j in range(len(fuel_cons)): for i in crab_pos: fuel_cons[j] += abs(i-j)*(1+abs(i-j))/2 print(fuel_cons) return(min(fuel_cons)) min_fuel_incremental('example.txt') min_fuel_incremental('input.txt') ``` ## Self-discussion It seems that the others could solve by much less time. Problem 1's position is "median"??
github_jupyter
``` import numpy as np import pandas as pd import torch import torchvision from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from matplotlib import pyplot as plt %matplotlib inline from scipy.stats import entropy from google.colab import drive drive.mount('/content/drive') path="/content/drive/MyDrive/Research/alternate_minimisation/" name="_50_50_10runs_entropy" # mu1 = np.array([3,3,3,3,0]) # sigma1 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu2 = np.array([4,4,4,4,0]) # sigma2 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu3 = np.array([10,5,5,10,0]) # sigma3 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu4 = np.array([-10,-10,-10,-10,0]) # sigma4 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu5 = np.array([-21,4,4,-21,0]) # sigma5 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu6 = np.array([-10,18,18,-10,0]) # sigma6 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu7 = np.array([4,20,4,20,0]) # sigma7 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu8 = np.array([4,-20,-20,4,0]) # sigma8 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu9 = np.array([20,20,20,20,0]) # sigma9 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu10 = np.array([20,-10,-10,20,0]) # sigma10 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # sample1 = np.random.multivariate_normal(mean=mu1,cov= sigma1,size=500) # sample2 = np.random.multivariate_normal(mean=mu2,cov= sigma2,size=500) # sample3 = np.random.multivariate_normal(mean=mu3,cov= sigma3,size=500) # sample4 = np.random.multivariate_normal(mean=mu4,cov= sigma4,size=500) # sample5 = np.random.multivariate_normal(mean=mu5,cov= sigma5,size=500) # sample6 = np.random.multivariate_normal(mean=mu6,cov= sigma6,size=500) # sample7 = np.random.multivariate_normal(mean=mu7,cov= sigma7,size=500) # sample8 = np.random.multivariate_normal(mean=mu8,cov= sigma8,size=500) # sample9 = np.random.multivariate_normal(mean=mu9,cov= sigma9,size=500) # sample10 = np.random.multivariate_normal(mean=mu10,cov= sigma10,size=500) # X = np.concatenate((sample1,sample2,sample3,sample4,sample5,sample6,sample7,sample8,sample9,sample10),axis=0) # Y = np.concatenate((np.zeros((500,1)),np.ones((500,1)),2*np.ones((500,1)),3*np.ones((500,1)),4*np.ones((500,1)), # 5*np.ones((500,1)),6*np.ones((500,1)),7*np.ones((500,1)),8*np.ones((500,1)),9*np.ones((500,1))),axis=0).astype(int) # print(X.shape,Y.shape) # # plt.scatter(sample1[:,0],sample1[:,1],label="class_0") # # plt.scatter(sample2[:,0],sample2[:,1],label="class_1") # # plt.scatter(sample3[:,0],sample3[:,1],label="class_2") # # plt.scatter(sample4[:,0],sample4[:,1],label="class_3") # # plt.scatter(sample5[:,0],sample5[:,1],label="class_4") # # plt.scatter(sample6[:,0],sample6[:,1],label="class_5") # # plt.scatter(sample7[:,0],sample7[:,1],label="class_6") # # plt.scatter(sample8[:,0],sample8[:,1],label="class_7") # # plt.scatter(sample9[:,0],sample9[:,1],label="class_8") # # plt.scatter(sample10[:,0],sample10[:,1],label="class_9") # # plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left') # class SyntheticDataset(Dataset): # """MosaicDataset dataset.""" # def __init__(self, x, y): # """ # Args: # csv_file (string): Path to the csv file with annotations. # root_dir (string): Directory with all the images. # transform (callable, optional): Optional transform to be applied # on a sample. # """ # self.x = x # self.y = y # #self.fore_idx = fore_idx # def __len__(self): # return len(self.y) # def __getitem__(self, idx): # return self.x[idx] , self.y[idx] #, self.fore_idx[idx] # trainset = SyntheticDataset(X,Y) # # testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform) # classes = ('zero','one','two','three','four','five','six','seven','eight','nine') # foreground_classes = {'zero','one','two'} # fg_used = '012' # fg1, fg2, fg3 = 0,1,2 # all_classes = {'zero','one','two','three','four','five','six','seven','eight','nine'} # background_classes = all_classes - foreground_classes # background_classes # trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, shuffle=True) # dataiter = iter(trainloader) # background_data=[] # background_label=[] # foreground_data=[] # foreground_label=[] # batch_size=100 # for i in range(50): # images, labels = dataiter.next() # for j in range(batch_size): # if(classes[labels[j]] in background_classes): # img = images[j].tolist() # background_data.append(img) # background_label.append(labels[j]) # else: # img = images[j].tolist() # foreground_data.append(img) # foreground_label.append(labels[j]) # foreground_data = torch.tensor(foreground_data) # foreground_label = torch.tensor(foreground_label) # background_data = torch.tensor(background_data) # background_label = torch.tensor(background_label) # def create_mosaic_img(bg_idx,fg_idx,fg): # """ # bg_idx : list of indexes of background_data[] to be used as background images in mosaic # fg_idx : index of image to be used as foreground image from foreground data # fg : at what position/index foreground image has to be stored out of 0-8 # """ # image_list=[] # j=0 # for i in range(9): # if i != fg: # image_list.append(background_data[bg_idx[j]]) # j+=1 # else: # image_list.append(foreground_data[fg_idx]) # label = foreground_label[fg_idx] - fg1 # minus fg1 because our fore ground classes are fg1,fg2,fg3 but we have to store it as 0,1,2 # #image_list = np.concatenate(image_list ,axis=0) # image_list = torch.stack(image_list) # return image_list,label # desired_num = 3000 # mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images # fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9 # mosaic_label=[] # label of mosaic image = foreground class present in that mosaic # list_set_labels = [] # for i in range(desired_num): # set_idx = set() # np.random.seed(i) # bg_idx = np.random.randint(0,3500,8) # set_idx = set(background_label[bg_idx].tolist()) # fg_idx = np.random.randint(0,1500) # set_idx.add(foreground_label[fg_idx].item()) # fg = np.random.randint(0,9) # fore_idx.append(fg) # image_list,label = create_mosaic_img(bg_idx,fg_idx,fg) # mosaic_list_of_images.append(image_list) # mosaic_label.append(label) # list_set_labels.append(set_idx) # def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number): # """ # mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point # labels : mosaic_dataset labels # foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average # dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9 # """ # avg_image_dataset = [] # for i in range(len(mosaic_dataset)): # img = torch.zeros([5], dtype=torch.float64) # for j in range(9): # if j == foreground_index[i]: # img = img + mosaic_dataset[i][j]*dataset_number/9 # else : # img = img + mosaic_dataset[i][j]*(9-dataset_number)/(8*9) # avg_image_dataset.append(img) # return torch.stack(avg_image_dataset) , torch.stack(labels) , foreground_index class MosaicDataset1(Dataset): """MosaicDataset dataset.""" def __init__(self, mosaic_list, mosaic_label,fore_idx): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.mosaic = mosaic_list self.label = mosaic_label self.fore_idx = fore_idx def __len__(self): return len(self.label) def __getitem__(self, idx): return self.mosaic[idx] , self.label[idx] , self.fore_idx[idx] # data = [{"mosaic_list":mosaic_list_of_images, "mosaic_label": mosaic_label, "fore_idx":fore_idx}] # np.save("mosaic_data.npy",data) data = np.load(path+"mosaic_data.npy",allow_pickle=True) mosaic_list_of_images = data[0]["mosaic_list"] mosaic_label = data[0]["mosaic_label"] fore_idx = data[0]["fore_idx"] batch = 250 msd = MosaicDataset1(mosaic_list_of_images, mosaic_label, fore_idx) train_loader = DataLoader( msd,batch_size= batch ,shuffle=True) ``` **Focus Net** ``` class Focus_deep(nn.Module): ''' deep focus network averaged at zeroth layer input : elemental data ''' def __init__(self,inputs,output,K,d): super(Focus_deep,self).__init__() self.inputs = inputs self.output = output self.K = K self.d = d self.linear1 = nn.Linear(self.inputs,50) #,self.output) self.linear2 = nn.Linear(50,self.output) def forward(self,z): batch = z.shape[0] x = torch.zeros([batch,self.K],dtype=torch.float64) y = torch.zeros([batch,self.d], dtype=torch.float64) x,y = x.to("cuda"),y.to("cuda") for i in range(self.K): x[:,i] = self.helper(z[:,i] )[:,0] # self.d*i:self.d*i+self.d log_x = F.log_softmax(x,dim=1) # log alpha to calculate entropy x = F.softmax(x,dim=1) # alphas x1 = x[:,0] for i in range(self.K): x1 = x[:,i] y = y+torch.mul(x1[:,None],z[:,i]) # self.d*i:self.d*i+self.d return y , x,log_x def helper(self,x): x = F.relu(self.linear1(x)) x = self.linear2(x) return x ``` **Classification Net** ``` class Classification_deep(nn.Module): ''' input : elemental data deep classification module data averaged at zeroth layer ''' def __init__(self,inputs,output): super(Classification_deep,self).__init__() self.inputs = inputs self.output = output self.linear1 = nn.Linear(self.inputs,50) self.linear2 = nn.Linear(50,self.output) def forward(self,x): x = F.relu(self.linear1(x)) x = self.linear2(x) return x criterion = nn.CrossEntropyLoss() def my_cross_entropy(x, y,alpha,log_alpha,k): # log_prob = -1.0 * F.log_softmax(x, 1) # loss = log_prob.gather(1, y.unsqueeze(1)) # loss = loss.mean() loss = criterion(x,y) #alpha = torch.clamp(alpha,min=1e-10) b = -1.0* alpha * log_alpha b = torch.mean(torch.sum(b,dim=1)) closs = loss entropy = b loss = (1-k)*loss + ((k)*b) return loss,closs,entropy ``` ``` def calculate_attn_loss(dataloader,what,where,criter,k): what.eval() where.eval() r_loss = 0 cc_loss = 0 cc_entropy = 0 alphas = [] lbls = [] pred = [] fidices = [] with torch.no_grad(): for i, data in enumerate(dataloader, 0): inputs, labels,fidx = data lbls.append(labels) fidices.append(fidx) inputs = inputs.double() inputs, labels = inputs.to("cuda"),labels.to("cuda") avg,alpha,log_alpha = where(inputs) outputs = what(avg) _, predicted = torch.max(outputs.data, 1) pred.append(predicted.cpu().numpy()) alphas.append(alpha.cpu().numpy()) #ent = np.sum(entropy(alpha.cpu().detach().numpy(), base=2, axis=1))/batch # mx,_ = torch.max(alpha,1) # entropy = np.mean(-np.log2(mx.cpu().detach().numpy())) # print("entropy of batch", entropy) #loss = (1-k)*criter(outputs, labels) + k*ent loss,closs,entropy = my_cross_entropy(outputs,labels,alpha,log_alpha,k) r_loss += loss.item() cc_loss += closs.item() cc_entropy += entropy.item() alphas = np.concatenate(alphas,axis=0) pred = np.concatenate(pred,axis=0) lbls = np.concatenate(lbls,axis=0) fidices = np.concatenate(fidices,axis=0) #print(alphas.shape,pred.shape,lbls.shape,fidices.shape) analysis = analyse_data(alphas,lbls,pred,fidices) return r_loss/i,cc_loss/i,cc_entropy/i,analysis def analyse_data(alphas,lbls,predicted,f_idx): ''' analysis data is created here ''' batch = len(predicted) amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0 for j in range (batch): focus = np.argmax(alphas[j]) if(alphas[j][focus] >= 0.5): amth +=1 else: alth +=1 if(focus == f_idx[j] and predicted[j] == lbls[j]): ftpt += 1 elif(focus != f_idx[j] and predicted[j] == lbls[j]): ffpt +=1 elif(focus == f_idx[j] and predicted[j] != lbls[j]): ftpf +=1 elif(focus != f_idx[j] and predicted[j] != lbls[j]): ffpf +=1 #print(sum(predicted==lbls),ftpt+ffpt) return [ftpt,ffpt,ftpf,ffpf,amth,alth] number_runs = 10 full_analysis =[] FTPT_analysis = pd.DataFrame(columns = ["FTPT","FFPT", "FTPF","FFPF"]) k = 0.005 every_what_epoch = 5 for n in range(number_runs): print("--"*40) # instantiate focus and classification Model torch.manual_seed(n) where = Focus_deep(5,1,9,5).double() torch.manual_seed(n) what = Classification_deep(5,3).double() where = where.to("cuda") what = what.to("cuda") # instantiate optimizer optimizer_where = optim.Adam(where.parameters(),lr =0.01) optimizer_what = optim.Adam(what.parameters(), lr=0.01) #criterion = nn.CrossEntropyLoss() acti = [] analysis_data = [] loss_curi = [] epochs = 2000 # calculate zeroth epoch loss and FTPT values running_loss ,_,_,anlys_data= calculate_attn_loss(train_loader,what,where,criterion,k) loss_curi.append(running_loss) analysis_data.append(anlys_data) print('epoch: [%d ] loss: %.3f' %(0,running_loss)) # training starts for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 what.train() where.train() if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 : print(epoch+1,"updating what_net, where_net is freezed") print("--"*40) elif ((epoch) % (every_what_epoch*2)) > every_what_epoch-1 : print(epoch+1,"updating where_net, what_net is freezed") print("--"*40) for i, data in enumerate(train_loader, 0): # get the inputs inputs, labels,_ = data inputs = inputs.double() inputs, labels = inputs.to("cuda"),labels.to("cuda") # zero the parameter gradients optimizer_where.zero_grad() optimizer_what.zero_grad() # forward + backward + optimize avg, alpha,log_alpha = where(inputs) outputs = what(avg) my_loss,_,_ = my_cross_entropy(outputs,labels,alpha,log_alpha,k) # print statistics running_loss += my_loss.item() my_loss.backward() if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 : optimizer_what.step() elif ( (epoch) % (every_what_epoch*2)) > every_what_epoch-1 : optimizer_where.step() # optimizer_where.step() # optimizer_what.step() #break running_loss,ccloss,ccentropy,anls_data = calculate_attn_loss(train_loader,what,where,criterion,k) analysis_data.append(anls_data) print('epoch: [%d] loss: %.3f celoss: %.3f entropy: %.3f' %(epoch + 1,running_loss,ccloss,ccentropy)) loss_curi.append(running_loss) #loss per epoch if running_loss<=0.001: break print('Finished Training run ' +str(n)) #break analysis_data = np.array(analysis_data) FTPT_analysis.loc[n] = analysis_data[-1,:4]/30 full_analysis.append((epoch, analysis_data)) correct = 0 total = 0 with torch.no_grad(): for data in train_loader: images, labels,_ = data images = images.double() images, labels = images.to("cuda"), labels.to("cuda") avg, alpha,log_alpha = where(images) outputs = what(avg) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 3000 train images: %d %%' % ( 100 * correct / total)) a,b= full_analysis[0] print(a) cnt=1 for epoch, analysis_data in full_analysis: analysis_data = np.array(analysis_data) # print("="*20+"run ",cnt,"="*20) plt.figure(figsize=(6,6)) plt.plot(np.arange(0,epoch+2,1),analysis_data[:,0],label="ftpt") plt.plot(np.arange(0,epoch+2,1),analysis_data[:,1],label="ffpt") plt.plot(np.arange(0,epoch+2,1),analysis_data[:,2],label="ftpf") plt.plot(np.arange(0,epoch+2,1),analysis_data[:,3],label="ffpf") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.title("Training trends for run "+str(cnt)) plt.savefig(path+"50_50_10runs_entropy/every5/run"+str(cnt)+".png",bbox_inches="tight") plt.savefig(path+"50_50_10runs_entropy/every5/run"+str(cnt)+".pdf",bbox_inches="tight") cnt+=1 np.mean(np.array(FTPT_analysis),axis=0) #array([87.85333333, 5.92 , 0. , 6.22666667]) FTPT_analysis.to_csv(path+"50_50_10runs_entropy/FTPT_analysis_every5"+name+".csv",index=False) FTPT_analysis ```
github_jupyter
``` import pandas as pd import pickle import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import precision_score, recall_score,accuracy_score,f1_score,roc_curve,auc from sklearn.neighbors import KNeighborsClassifier ``` # Load Dataset ``` training_set = pd.read_csv("../dataset/cleaned_movie_train.csv") Y=training_set['sentiment'].values X=training_set['review'].values x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.20,random_state=42,stratify=Y) print ("No. of Training Examples: ",x_train.shape) print ("No. of Testing Examples: ",x_test.shape) tf=TfidfVectorizer(min_df=10,max_df=0.95,use_idf=True) tf.fit_transform(x_train) X_train=tf.transform(x_train) # for train data we can use fit_transfrom also. X_test=tf.transform(x_test) # pickle.dump(tf, open('vectorizer2_clean_mix.sav', 'wb')) # Evaluating models peformance based on precision, recall and accuracy def do_evaluation (predicted, actual,verbose=True): precision = precision_score(actual,predicted) recall = recall_score(actual,predicted) accuracy = accuracy_score(actual,predicted) f1score = f1_score(predicted,actual) if verbose: print('"Evaluation"','| Precision ==',round(precision*100,2),'| Recall ==',round(recall*100,2),'| Accuracy ==',round(accuracy*100,2),'| F1 score ==',round(f1score*100,2)) ``` # Training phase.. ``` # Random Foreset Classifier knn = KNeighborsClassifier(n_neighbors=120,leaf_size=80, p=2) knn.fit(X_train,y_train) # Testing phase knn_pred=knn.predict(X_test) print("Accuracy: ",round(accuracy_score(y_test,knn_pred),3)) print ('{:.1%} of prediction are positive'.format(float(sum(knn_pred))/len(y_test))) print ('{:.1%} are actually positive'.format(float(sum(y_test))/len(y_test))) do_evaluation (knn_pred,y_test, verbose=True) pickle.dump(knn, open('knn2_clean_mix_0.788_8,34,2.sav', 'wb')) ``` # E valuate classifier performance(roc and auc curve) ``` def display_curve(nb_pred,name): #Calculating False Positive Rate ,True Positive Rate and threshold fpr_nb, tpr_nb, _ = roc_curve(y_test, nb_pred) #AUC is the percentage of the ROC plot that is underneath the curve: roc_auc_nb = auc(fpr_nb, tpr_nb) plt.title(f'Operating Characteristic for {name} Classifier') plt.plot(fpr_nb, tpr_nb, 'b', label = 'AUC = %0.2f' % roc_auc_nb) plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1],'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() # Gaussian Naive Bayes Classifier display_curve(knn_pred,'KNN') ``` # Testing ``` # Load model knn = pickle.load(open('knn2_clean_mix_0.788_8,34,2.sav','rb')) tf = pickle.load(open('vectorizer2_clean_mix.sav','rb')) # Testing test_array = [ 'entertaining film follows rehearsal process ny production best taking seriously recognizable actors john glover gloria reubens david lansbury nice jobs main roles highlight hilarious scene murder banquo john elsen rehearsed probably entertaining film involved theatre anyone enjoys shakespeare enjoy film', 'could otherwise run mill mediocre film infidelity sixties subtle free love period top happily ever ending time ever feel sympathy diane lane anna paquin troublesome middle class care free life feel emasculated liev shrieber story line plods along slowly predictable pathetic conclusion thing interesting watchable film stunning diane lane topless hint occurs 30 minutes film fast forward part skip rest', 'cosimo luis guzmán told prison perfectsoon five guys organizing crime five guys little brain capacity brothers anthony joe russo directors welcome collinwood 2002 crime comedy often funny cannot help laughing everything goes wrong guys great actors playing characters william macy plays riley isaiah washington leon sam rockwell pero michael jeter toto andy davoli basil gabrielle union plays love interest michelle jennifer esposito plays pero love interest carmela george clooney also producer plays jerzy tattooed guy wheelchair highly entertaining flick certainly recommend', 'summer full blockbusters comebacks woe find film could sit enjoy case want read page spoilers sum mature ella enchanted questionably violent parts plenty death handful scenes little blood small children try overly corny overstep bounds think bit serious bit magical princess bride close perhaps prodigious movie goer others maybe twice month feel active also huge sci fi fantasy fan get bored remade repetitive story lines films flash filling faster count 10 film diamond rough end august tired enough fractured expectations big hits averted seeing bourne ultimatum favor stardust hopes thoroughly muddied transformers fiction addiction previews stardust seemed appealing certainly wary many others utterly surprised gone thinking see another generic fantasy movie clichéd breakfast fooled definitely fairy tale indeed witches magic utterly requires suspension disbelief refreshing thing found based anything seen read past 15 years actually really good movie unlike 90 movies seem persistently appear like thorns side perhaps sign hollywood running ideas could read book year two years movie would another epic fantasy tale likes lotr rest says nyt doubt stardust bolted seat jam packed action every turn sweating bullets plot hook plot hook threatening tear dramatic tension apart echo throughout theater loud boom even use enormous blasts sound grab attention happening screen transformers looking trying show latest cgi techniques offend intelligence dimwitted dialogs story lines simple enough could figured 3rd grade boy hate watched watched enjoyed refreshingly creative storyline unfold eyes sure may known going happen throughout film makes forget even made heart twinge parts important aspect noticed left theater feeling better would gone truly gem much slush summer many remakes films fell short expectations like cold sweet cup tea cap hard work would done sitting others trying come money worth probably everyone favor enjoy fantasy films stand test time alone princess bride black cauldron dark crystal etc really see movie little diamond finding way dvd collection moment hits stores trust simply wonderful', 'best movie ever seen maybe live area sweden movie tells truth believe criticizes honors lifestyle dalarna producer wants people watch movie opened minded care closest friends relatives live another small village anywhere sweden another country probably also recognize much movie thank maria blom', 'story deals jet li fight oldfriends one problem friends superfighters film filled blood super action best stunts forever lau ching wan great co actor course movie typical hk fun love germany black mask uncut', 'emotional impact movie defies words elegant subtle beautiful tragic rolled two hours smith matures acting ability full range knew saw pursuit happiness thought must fluke blockbuster top actor smith performances movies portray whole dimension smith refinement talent selectivity scripts sure view differently seven pounds one movies order fully enjoy essence suspend belief watch plot watch fragile condition human heart literally metaphorically story human guilt atonement love sacrifice', 'oh man want give internal crow robot real workout movie pop ol vcr potential cut lines film endless minor spoilers ahead hey really care film quality spoiled traci girl problem psychology developed names child develops sexual crush opposite sex parent girl seems one sex one think term might mother dana played rosanna arquette whose cute overbite neo flowerchild sexuality luscious figure makes forgive number bad movies unsympathetic characters dana clueless daughter conduct seems competing gold medal olympic indulgent mother competition possible dana misses traci murderous streak truth told traci seems criminal skills hamster script dictates manages pull kind body count particularly hilarious note movie character carmen mexican maid described dana around long like one family although dresses director thought would say fell tomato truck guadalajara carmen wise traci scheming might also wear sign saying hey next victim sure enough traci confronts carmen carmen making way back mass bops one slightly angled lug wrenches car manufacturers put next spare bad joke rather suspect real life things useless murder weapon changing tire another sequence arquette wears flimsy dress vineyard cloudy skies talking owner cut another flimsy dress sunny skies talking owner brother cut wearing first dress first location cloudy skies supposed later get picture talking really bad directing skin expect much although traci nice couple bikinis looking trash wallow 8 anybody else', 'life time little richard told little richard produced directed little richard one sided one songs biography even docudrama good writing great energy outstanding leading actor playing richard music little richard rocks tight lipsync every song movie covers early childhood carrys thru formative years music wild success richard throwing away praise lord tied together well obvious comeback 1962 manages stay away idea little richard discovered beatles opened main objection outrageous counter cultural behavior underplayed get feel audience experienced time energy still come across full force seemed tame compared remember time best scenes richard getting jilted lucille writing song strip bikini shorts performing make point decent place change gotten bronze liberace richard use refer interviews story trust saw perform couple months ago still flirts pretty white boys giving one particularly good dancer audience headband nearly 68 still going strong recommend movie concert v appearance find little richard always', 'script weak enough character arcs make care one bit characters happens script way talky enough gore action even call slow paced story gets point want everyone shut die quickly possible listen talk muted stiff dialogue technical note music mix way high makes hard understand said times could called blessing overall story could better told short film running time 30 minutes obvious face homages sam raimi evil dead would good subtle seem like bald faced rip mon kind 35mm budget best could done still cinematography lighting design shots well done indeed', 'savage island raw savagery scare hell trust boy estranged savage family run city slicker tourists pa savage wants revenge stop nothing gets real horror film truly wonderful horror moments also negative review clearly comes someone lacks proper knowledge film filmmakers chose lighting camera work order reflect dark murky egdy mood story words obtain certain aesthetic fact film several horror film festival awards', 'docteur petiot starring michel serrault brutal yet preys weakest amidst populace imagery cinematography superb lend additional macabre feeling complex story perfect psychopath seductive altruistic intelligent caring calculating murderous movie certain forgotten soon viewer kudos mr serrault chilling portrayal', 'one favourite flicks unlike weak elvira stranded unfamiliar town death good witch elviras aunt morgana inherits ruby ring extremely powerful sought bad warlock uncle befriends four characters inadvertently helps grow throughout movie dog tow show uncle wicked witch west elvira realises strength within ends defeating end gets sent towns folk winning hearts finally gets destination las vegas dorothy home kansas many references made wizard oz throughout movie uncle quote lines relevant parallel characters elvira youe must aunt em must uncle remus place like home place like home bad uncle vinny get pretty little dog sign elvira passes first road trip mentions state kansas aside fact one sequences ripped um mean inspired flashdance pure genius roll around laughing titty twirling end 80 las vegas show got camp bone body movie cult camp classic', 'oscar nominations zero win yet understandlike done halle berry denzel washington whoopi oprah margaret avery danny glover etc amazing curious get scripts discussions oscars year go shoulda would coulda category judges amazing book true alice walker style writing way seeming like exaggerating characters glad screen adaptation took things cinematography amazing african scenes live much desired african part book supposed set liberia somewhere west africa oh steven spielberg thinks world dumb cannot think africa outside safaris yes complimentary zebra wildlife scene know none west africa get people speak swahili west africa speaks swahili get way story amazing film making world classic yes watch soul needs rejuvenation', 'kurt thomas stars jonathan cabot ninjas stand chance especially since cabot gymnast taken whole gymkata one helluva bad movie atrocious acting god awful script really incompetent directing make quality human standards however movie terrible becomes really really funny mean dialog know outsleep ha add mock value gymkata obtains besides wisely movie hero gymnast finds things swing heat moment', 'film pretty good big fan baseball glover joseph gordon levitt brenda fricker christopher lloyd tony danza milton davis jr brought variety talented actors understanding sport plot believable love message william dear guys put together great movie sports films revolve around true stories events often work well film hits 10 perfectness scale even though minor mistakes', 'warm funny film much vein works almodovar sure 10 year cannot understand readers found sick perverted would willing let 10 year old son play part sure spanish cinema often quite sexual open healthy way leaves viewer without sense voyeurism kink think northern european types attitude would much better result liberal attitude also seen hilarious fartman maurice character lover says people embarrassed farting turn art form', 'although great film something compelling memorable like never forgotten story ridiculously cumbersome title see opportunity feel like voyeur small town life evolves decades film one brings human face historical drama early twentieth century progress engaging enough young viewer memorable enough older one furthermore easy like characters watch passage time', 'movie distinct albeit brutish rough humanity borderline depravity zippy like terrorizing woman train semi pitiful vulnerability lurks never far away dewaere sucks breasts like baby blier cuts away scene depardieu may rape dewaere never sure explicitly read manifestly homoerotic aspect relationship either way incident start relative humanization movie could certainly read pro gay although could likely read pro anything want movie many objectionable scenes points sexual politics probably best taken general cartoon foibles sexes making mockery whole notion sensitivity honesty hitting numerous points possible profundity basis fire enough shots bound hit', 'one remarkable sci fi movies millennium movie incredible establishes new standard f movies hail kill', 'care peopl vote movi bad want truth good thing movi realli get one', 'never realli understood controversi hype provok social drama could ever experi yeah right might littl shock mayb often see someon get shot ars weak pointless plot sure think much bais moi anoth one blame everyth go wrong societi film gener convinc 99 peopl function perfectli well societi would blame exact societi vile hopeless act two derang nymph girl two main charact miser life introduc separ flash nadin kill roommat manu shot brother two meet abandon train station late night decid travel around franc togeth leav trail sex blood behind wherev made stop although constantli expos pornographi violenc film bore sit like girl indic time dialogu lame peopl run kill uninterest peopl want make porno movi fine pleas pretend art hous film make leav swear hip camera work see arous pornographi cool soundtrack though', 'sweet entertain tale young find work retir eccentr tragic actress well act especi juli walter rupert grint play role teenag boy well show talent last longer harri potter seri film laura linney play ruthlessli strict mother without hint redempt room like film entertain film made well british style like keep mum calendar girl', 'first mention realli enjoy skin man peach hip girl although owe debt tarentino pulp fiction ishii cast task carri stori entir film crackl energi scene asano tadanobu gashuin tatsuya particularli engag action intrigu bizarr character enough sex keep thing interest utterli unpredict stori line certain amount anticip optim began watch parti 7 enthusiasm certainli piqu open credit left wife actual stun dynam excit mix anim live action work brilliant actual movi start actual much start sort shuffl side door stand fumbl pocket look uncomfort entir film take place three room one futurist voyeur paradis borrow bit shark skin man anoth travel agent offic third far use seedi hotel room room cast seven charact meet approxim noth realli stranger talk film one time favorit dinner andr talkiest talk film dinner andr far excit two middl age men discuss life dinner key andr gregori wallac shawn tell interest stori cast parti 7 liter whine entir film ye realli ye realli realli realli ye realli get idea hope wish direct parti 7 unbeliev unengag film flimsiest plot money stolen yakuza like shark skin man accompani almost action interest dialog charact larg uninterest ishii took throwaway convers moment tarentino film built entir film around tarentino convers alway intern logic wit call royal chees dialog duller imagin brief hilari cameo gashuin alway marvel low key perform awesom asano tadanobu would given parti 7 singl star realli chore make way', 'argentinian music poet film feel music repeat world time countri histori first listen play tri make other hear believ hear nobodi say anyth peopl appear listen other recogn heard think other might hear final everybodi listen music suddenli sound love poetri real nation legaci father child would call film dead nobodi dy spanish translat titl refus follow rule call dublin follow jame joyc titl nice 1900 irish film postcard', 'saw film chanc small box fantast chill cannot believ still wait 5 vote', 'small california town diablo plagu mysteri death sheriff robert lopez unearth ancient box legend box hold sixteenth centuri mexican demon name azar fbi agent gil vega sent investig murder join forc sheriff daughter dominiqu mari fight evil bloodthirsti demon legend diablo absolut garbag film lack scare gore act amateurish direct bad anim one aspect film enjoy big fan indi horror flick exampl love torch live feed bone sick neighborhood watch unfortun legend diablo huge misfir definit one avoid', 'good see vintag film buff correctli categor excel dvd releas excus elabor girli show kitti carlisl gertrud michael lead cast super decor girl includ ann sheridan luci ball beryl wallac gwenllian gill gladi young barbara fritchi wanda perri dorothi white carl brisson also hand lend strong voic cocktail two undoubtedli movi popular song heard le four time howev gertrud michael steal show rendit sweet marijauna strong perform hero reject girlfriend rest cast could done without jack oaki victor mclaglen altogeth good thing oaki role weak run gag cult icon tobi wing fact give idea far rest comedi indulg strain super dumb inspector mclaglen simpli cannot put hand killer even though would believ instanc happen person suspect director mitch leisen actual go great pain point killer even dumbest member cinema audienc give player concern close close', 'saw film via one actor agent sure conform great deal come film excel mostli kid actor ham embarrass case realli good term surreal thingi mention jingo well think film plain weird real weirdo film weirdo locat storylin weird stuff go whole time good weird oppos bad hard think movi like like car ate pari mayb like repuls actual think like hammer movi 60 certainli interest mind work behind jingo question also titl modern love anyon also jingo mean god forsaken talk australia hmm curiou', 'civil war mani case divid loyalti obvious mani occur border owen moor go join union armi shortli confeder soldier henri walthal separ regimen wander onto enemi properti desper water find suppli unionist young daughter gladi egan sit yanke soldier track littl gladi innoc help confeder hide later return kill father littl girl kind rememb sweet small stori director w griffith locat footag human lovingli display border state 6 13 10 w griffith henri walthal owen moor gladi egan' ] test_result = [1,0,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,0,1,1,1,0,1,0,1,1,0,1,1,0] test_func = lambda x: 'pos' if x==1 else 'neg' knn_c = knn.predict(tf.transform(test_array).toarray()) count_currect=0 for sentence,l,r in zip(test_array,knn_c,test_result): # print(sentence,': Random Forest=',test_func(l)) if l==r: count_currect +=1 print('KNN',count_currect/len(test_result)*100) ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/JavaScripts/Image/ReduceRegion.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/Image/ReduceRegion.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/Image/ReduceRegion.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.eefolium as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ``` Map = emap.Map(center=[40,-100], zoom=4) Map.add_basemap('ROADMAP') # Add Google Map Map ``` ## Add Earth Engine Python script ``` # Add Earth Engine dataset # Image.reduceRegion example # # Computes a simple reduction over a region of an image. A reduction # is any process that takes an arbitrary number of inputs (such as # all the pixels of an image in a given region) and computes one or # more fixed outputs. The result is a dictionary that contains the # computed values, which in this example is the maximum pixel value # in the region. # This example shows how to print the resulting dictionary to the # console, which is useful when developing and debugging your # scripts, but in a larger workflow you might instead use the # Dicitionary.get() function to extract the values you need from the # dictionary for use as inputs to other functions. # The input image to reduce, in this case an SRTM elevation map. image = ee.Image('CGIAR/SRTM90_V4') # The region to reduce within. poly = ee.Geometry.Rectangle([-109.05, 41, -102.05, 37]) # Reduce the image within the given region, using a reducer that # computes the max pixel value. We also specify the spatial # resolution at which to perform the computation, in this case 200 # meters. max = image.reduceRegion({ 'reducer': ee.Reducer.max(), 'geometry': poly, 'scale': 200 }) # Print the result (a Dictionary) to the console. print(max) ``` ## Display Earth Engine data layers ``` Map.addLayerControl() # This line is not needed for ipyleaflet-based Map. Map ```
github_jupyter
# Automatic Learning of Key Phrases and Topics in Document Collections ## Part 4: Topic Model Summarization ### Overview This notebook is Part 4 of 6 in a series providing a step-by-step description of how to process and analyze the contents of a large collection of text documents in an unsupervised manner. Using Python packages and custom code examples, we have implemented the basic framework that combines key phrase learning and latent topic modeling as described in the paper entitled ["Modeling Multiword Phrases with Constrained Phrases Tree for Improved Topic Modeling of Conversational Speech"](http://people.csail.mit.edu/hazen/publications/Hazen-SLT-2012.pdf) which was originally presented in the 2012 IEEE Workshop on Spoken Language Technology. Although the paper examines the use of the technology for analyzing human-to-human conversations, the techniques are quite general and can be applied to a wide range natural language data including news stories, legal documents, research publications, social media forum discussion, customer feedback forms, product reviews, and many more. Part 4 of the series shows how to summarize the contents of the document based on a trained LDA topic model. The summarization is applied to an LDA topic model learned in Part 3. > **NOTE:** If you have retrained your own LDA model, you may not get the same topic model we are showing in this notebook. For the demonstration purpose, all files used in this notebook can be downloaded via the links below. You can download all files to the `AZUREML_NATIVE_SHARE_DIRECTORY` folder and you will have exactly the same results in this notebook. | File Name | Link | |-----------|------| | `CongressionalDocsLDA.pickle` | https://bostondata.blob.core.windows.net/scenario-document-collection-analysis/CongressionalDocsLDA.pickle | | `CongressionalDocsLDA.pickle.expElogbeta.npy` | https://bostondata.blob.core.windows.net/scenario-document-collection-analysis/CongressionalDocsLDA.pickle.expElogbeta.npy | | `CongressionalDocsLDA.pickle.id2word` | https://bostondata.blob.core.windows.net/scenario-document-collection-analysis/CongressionalDocsLDA.pickle.id2word | | `CongressionalDocsLDA.pickle.state` | https://bostondata.blob.core.windows.net/scenario-document-collection-analysis/CongressionalDocsLDA.pickle.state | | `CongressionalDocsLDA.pickle.state.sstats.npy` | https://bostondata.blob.core.windows.net/scenario-document-collection-analysis/CongressionalDocsLDA.pickle.state.sstats.npy | | `CongressionalDocTopicLM.npy` | https://bostondata.blob.core.windows.net/scenario-document-collection-analysis/CongressionalDocTopicLM.npy | | `CongressionalDocTopicProbs.npy` | https://bostondata.blob.core.windows.net/scenario-document-collection-analysis/CongressionalDocTopicProbs.npy | | `CongressionalDocTopicSummaries.tsv` | https://bostondata.blob.core.windows.net/scenario-document-collection-analysis/CongressionalDocTopicSummaries.tsv | | `Vocab2SurfaceFormMapping.tsv` | https://bostondata.blob.core.windows.net/scenario-document-collection-analysis/Vocab2SurfaceFormMapping.tsv | Need to set the `saveFile` flag to `True` in case you do not want to download those pre-trained files, and want to re-run everything. ``` saveFile = True ``` ### Download Data Files (optional) You can download all those data files by executing the code in the cells below. ``` import urllib.request import os def download_file_from_blob(filename): shared_path = os.environ['AZUREML_NATIVE_SHARE_DIRECTORY'] save_path = os.path.join(shared_path, filename) if not os.path.exists(save_path): # Base URL for anonymous read access to Blob Storage container STORAGE_CONTAINER = 'https://bostondata.blob.core.windows.net/scenario-document-collection-analysis/' url = STORAGE_CONTAINER + filename urllib.request.urlretrieve(url, save_path) print("Downloaded file: %s" % filename) else: print("File \"%s\" already existed" % filename) download_file_from_blob('CongressionalDocsLDA.pickle') download_file_from_blob('CongressionalDocsLDA.pickle.expElogbeta.npy') download_file_from_blob('CongressionalDocsLDA.pickle.id2word') download_file_from_blob('CongressionalDocsLDA.pickle.state') download_file_from_blob('CongressionalDocsLDA.pickle.state.sstats.npy') download_file_from_blob('CongressionalDocTopicLM.npy') download_file_from_blob('CongressionalDocTopicProbs.npy') download_file_from_blob('CongressionalDocTopicSummaries.tsv') download_file_from_blob('Vocab2SurfaceFormMapping.tsv') # Set the saveFile flag to False since you have already downloaded those files saveFile = False ``` ### Import Relevant Python Packages Most significantly, Part 3 relies on the use of the [Gensim Python library](http://radimrehurek.com/gensim/) for generating a sparse bag-of-words representation of each document and then training a [Latent Dirichlet Allocation (LDA)](https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation) model on the data. LDA produces a collection of latent topics learned in a completely unsupervised fashion from the text data. Each document can then be represented with a distribution of the learned topics. ``` import numpy import pandas import re import math import os import warnings warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim') import gensim from gensim import corpora from gensim import models from operator import itemgetter from collections import namedtuple import time import gc import sys import multiprocessing import matplotlib matplotlib.use('Agg') from azureml.logging import get_azureml_logger aml_logger = get_azureml_logger() # logger writes to AMLWorkbench runtime view aml_logger.log('amlrealworld.document-collection-analysis.notebook4', 'true') ``` ### Load the Trained LDA Model Learned in Part 3 > **NOTE** The data file is saved under the folder defined by environment variable `AZUREML_NATIVE_SHARE_DIRECTORY` in notebook 1. If you have changed it to `../Data`, please also do the change here. ``` # Load pretrained LDA topic model ldaFile = os.path.join(os.environ['AZUREML_NATIVE_SHARE_DIRECTORY'], "CongressionalDocsLDA.pickle") lda = gensim.models.ldamodel.LdaModel.load(ldaFile) # Get the mapping from token ID to token string id2token = lda.id2word print(id2token[1]) ``` ### Load the Mapping of Lower-Cased Vocabulary Items to Their Most Common Surface Form ``` # Load surface form mappings here fp = open(os.path.join(os.environ['AZUREML_NATIVE_SHARE_DIRECTORY'], "Vocab2SurfaceFormMapping.tsv"), encoding='utf-8') vocabToSurfaceFormHash = {} # Each line in the file has two tab separated fields; # the first is the vocabulary item used during modeling # and the second is its most common surface form in the # original data for stringIn in fp.readlines(): fields = stringIn.strip().split("\t") if len(fields) != 2: print ("Warning: Bad line in surface form mapping file: %s" % stringIn) elif fields[0] == "" or fields[1] == "": print ("Warning: Bad line in surface form mapping file: %s" % stringIn) else: vocabToSurfaceFormHash[fields[0]] = fields[1] fp.close() def CreateTermIDToSurfaceFormMapping(id2token, token2surfaceform): termIDToSurfaceFormMap = [] for i in range(0, len(id2token)): if id2token[i] in token2surfaceform: termIDToSurfaceFormMap.append(token2surfaceform[id2token[i]]) return termIDToSurfaceFormMap; termIDToSurfaceFormMap = CreateTermIDToSurfaceFormMapping(id2token, vocabToSurfaceFormHash); # print out the modeled token form and the best matching surface for the token with the index value of 18 i = 18 print('Term index:', i) print('Modeled form:', id2token[i]) print('Surface form:', termIDToSurfaceFormMap[i]) ``` ### Use the Build-in <i> print_topics </i> Method to Summarize a Random Sample of 10 Topics ``` numTopics = lda.num_topics print ("Number of topics:", numTopics) lda.print_topics(10) ``` ### Use Word Cloud to Visualize a Topic ``` import matplotlib.pyplot as plt from wordcloud import WordCloud def _terms_to_counts(terms, multiplier=1000): return ' '.join([' '.join(int(multiplier * x[1]) * [x[0]]) for x in terms]) def visualizeTopic(lda, topicID=0, topn=500, multiplier=1000): terms = [] tmp = lda.show_topic(topicID, topn) for term in tmp: terms.append(term) # If the version of wordcloud is higher than 1.3, then you will need to set 'collocations' to False. # Otherwise there will be word duplicates in the figure. try: wordcloud = WordCloud(max_words=10000, collocations=False).generate(_terms_to_counts(terms, multiplier)) except: wordcloud = WordCloud(max_words=10000).generate(_terms_to_counts(terms, multiplier)) fig = plt.figure(figsize=(12, 16)) plt.imshow(wordcloud) plt.axis("off") plt.title("Topic %d" % topicID) plt.show() %matplotlib inline ``` Visualize topic \#38 using Word Cloud. This topic seems to be related to national security. ``` visualizeTopic(lda, topicID=38, topn=1000) ``` Visualize topic \#168 using Word Cloud. This topic is mainly related to health care. ``` visualizeTopic(lda, topicID=168, topn=1000) ``` ### Generate Various Required Probability Distributions #### Load the Document Probability Score P(topic|doc) Computed by the LDA Model from File In this section, each document from the corpus is passed into the LDA model which then infers the topic distribution for each document. The topic distributions are collected into a single numpy array. ``` docTopicProbsFile = os.path.join(os.environ['AZUREML_NATIVE_SHARE_DIRECTORY'], "CongressionalDocTopicProbs.npy") # docTopicProbs[docID,TopicID] --> P(topic|doc) docTopicProbs = numpy.load(docTopicProbsFile) # The docTopicProbs shape should be (# of docs, # of topics) docTopicProbs.shape ``` #### Compute the Global Topic Likelihood Scores P(topic) ``` # Computing the global topic likelihoods by aggregating topic probabilities over all documents # topicProbs[topicID] --> P(topic) def ComputeTopicProbs(docTopicProbs): topicProbs = docTopicProbs.sum(axis=0) topicProbs = topicProbs/sum(topicProbs) return topicProbs topicProbs = ComputeTopicProbs(docTopicProbs) ``` #### Convert the Topic Language Model Information P(term|topic) from the LDA Model into a NumPy Representation ``` def ExtractTopicLMMatrix(lda): # Initialize the matrix docTopicProbs = numpy.zeros((lda.num_topics,lda.num_terms)) for topicID in range(0,lda.num_topics): termProbsList = lda.get_topic_terms(topicID,lda.num_terms) for termProb in termProbsList: docTopicProbs[topicID,termProb[0]]=termProb[1] return docTopicProbs # topicTermProbs[topicID,termID] --> P(term|topic) topicTermProbs = ExtractTopicLMMatrix(lda) # Set saveFile flag to true if you want to save the Topic LMs for a newly trained LDA model to file if saveFile: numpy.save(os.path.join(os.environ['AZUREML_NATIVE_SHARE_DIRECTORY'], "CongressionalDocTopicLM.npy"), topicTermProbs) ``` #### Compute P(topic,term), P(term), and P(topic|term) ``` # Compute the joint likelihoods of topics and terms # jointTopicTermProbs[topicID,termID] --> P(topic,term) = P(term|topic)*P(topic) jointTopicTermProbs = numpy.diag(topicProbs).dot(topicTermProbs) # termProbs[termID] --> P(term) termProbs = jointTopicTermProbs.sum(axis=0) # topicProbsPermTerm[topicID,termID] --> P(topic|term) topicProbsPerTerm = jointTopicTermProbs / termProbs ``` Print the most frequent words in the LDA vocabulary. Compare it to Cell 11 in Notebook 3, and you will find that the most frequent words in LDA vocabulary are NOT the same as the most frequent words in corpus vocabulary. This is due to the fact that the probability derived from the LDA model do not account for document length, and therefore words common in a shorter documents carry more weight in these distributions than words common in longer documents. ``` # Print most frequent words in LDA vocab mostFrequentTermIDs = (-termProbs).argsort() for i in range(0,25): print ("%d: %s --> %f" % (i+1, id2token[mostFrequentTermIDs[i]], termProbs[mostFrequentTermIDs[i]])) ``` #### Compute WPMI To determine which vocabulary terms are most representative of a topic, systems typically just choose a set of terms that are most likely for the topic, i.e., terms that maximize the language model expression <i>P(term|topic)</i> for the given topic. This approach is adequate for many data sets. However, for some data sets there may be common words in the corpus that are frequent terms within multiple topics, and hence not a distinguishing term for any of these topics. In this case, selecting words which have the largest weighted pointwise mutual information (WPMI) with a given topic is more appropriate. The expression for WPMI between a word and token is given as: $WPMI(term,topic) = P(term,topic)\log\frac{P(term,topic)}{P(term)P(topic)} = P(term,topic)\log\frac{P(topic|term)}{P(topic)}$ ``` topicTermWPMI =(jointTopicTermProbs.T * numpy.log(topicProbsPerTerm.T / topicProbs)).T topicTermWPMI.shape ``` #### Compute Topic to Document Purity measure for Each Topic One measure of the importance or quality of a topic is its topic to document purity measure. This purity measure assumes latent topics that dominate the documents in which they appear are more semantically important than latent topics that are weakly spread across many documents. This concept was introduced in the paper ["Latent Topic Modeling for Audio Corpus Summarization"](http://people.csail.mit.edu/hazen/publications/Hazen-Interspeech11.pdf). The purity measure is expressed by the following equation: $Purity(topic) = \exp\left ( \frac{\sum_{\forall doc}P(topic|doc)\log P(topic|doc)}{\sum_{\forall doc}P(topic|doc)} \right )$ ``` topicPurity = numpy.exp(((docTopicProbs * numpy.log(docTopicProbs)).sum(axis=0))/(docTopicProbs).sum(axis=0)) ``` ### Create Topic Summaries In the code snippet below we demonstrate how the WPMI measure lowers the score of some common tokens that do not provide value in a topic summary in comparison to the standard word likely measure P(token|topic). For topic 38 below notice how the generic words <i>United States</i>, <i>including</i>, and <i>Government</i> have their position in the summaries lowered by the WPMI measure relative to the straight P(token|topic) measure, while the WMPI measure improves the ranking for the content bearing tokens <i>Security Act</i>, <i>security forces</i> and <i>counterterrorism</i>. Again, this may not apply to your LDA model if you have retrained one. ``` topicID = 38 highestWPMITermIDs = (-topicTermWPMI[topicID]).argsort() highestProbTermIDs = (-topicTermProbs[topicID]).argsort() print (" WPMI Prob") for i in range(0,15): print ("%2d: %35s ---> %8.6f %35s ---> %8.6f" % (i+1, termIDToSurfaceFormMap[highestWPMITermIDs[i]], topicTermWPMI[topicID,highestWPMITermIDs[i]], termIDToSurfaceFormMap[highestProbTermIDs[i]], topicTermProbs[topicID,highestProbTermIDs[i]])) def CreateTopicSummaries(topicTermScores, id2token, tokenid2surfaceform, maxStringLen): reIgnore = re.compile('^[a-z]\.$') reAcronym = re.compile('^[A-Z]+$') topicSummaries = [] for topicID in range(0,len(topicTermScores)): rankedTermIDs = (-topicTermScores[topicID]).argsort() maxNumTerms = len(rankedTermIDs) termIndex = 0 stop = 0 outputTokens = [] prevAcronyms = [] topicSummary = "" while not stop: # If we've run out of tokens then stop... if (termIndex>=maxNumTerms): stop=1 # ...otherwise consider adding next token to summary else: nextToken = id2token[rankedTermIDs[termIndex]] nextTokenOut = tokenid2surfaceform[rankedTermIDs[termIndex]] keepToken = 1 # Prepare to test current word as an acronym or a string that reduces to an acronym nextTokenIsAcronym = 0 nextTokenAbbrev = "" if reAcronym.match(nextTokenOut) != None: nextTokenIsAcronym = 1 else: subTokens = nextToken.split('_') if (len(subTokens)>1): for subToken in subTokens: nextTokenAbbrev += subToken[0] # See if we should ignore this token because it matches the regex for tokens to ignore if ( reIgnore.match(nextToken) != None ): keepToken = 0; # Otherwise see if we should ignore this token because # it is a close match to a previously selected token elif len(outputTokens) > 0: for prevToken in outputTokens: # Ignore token if it is a substring of a previous token if nextToken in prevToken: keepToken = 0 # Ignore token if it is a superstring of a previous token elif prevToken in nextToken: keepToken = 0 # Ignore token if it is an acronym of a previous token elif nextTokenIsAcronym: subTokens = prevToken.split('_') if (len(subTokens)>1): prevTokenAbbrev = "" for subToken in subTokens: prevTokenAbbrev += subToken[0] if prevTokenAbbrev == nextToken: keepToken = 0 for prevAcronym in prevAcronyms: # Ignore token if it is the long form of an earlier acronym if nextTokenAbbrev == prevAcronym: keepToken = 0 # Add tokens to the summary for this topic if keepToken: # Always add at least one token to the summary if len(topicSummary) == 0 or ( len(topicSummary) + len(nextTokenOut) + 1 < maxStringLen): if len(topicSummary) == 0: topicSummary = nextTokenOut else: topicSummary += ", " + nextTokenOut outputTokens.append(nextToken) if nextTokenIsAcronym: prevAcronyms.append(nextToken) # If we didn't add the previous word and we're within 10 characters of # the max string length then we'll just stop here elif maxStringLen - len(topicSummary) < 10 : stop = 1 # Otherwise if the current token is too long, but we still have more than # 10 characters of space left we'll just skip this one and add the next token # one if it's short enough termIndex += 1 topicSummaries.append(topicSummary) return topicSummaries topicSummaries = CreateTopicSummaries(topicTermWPMI, id2token, termIDToSurfaceFormMap, 85) # Rank the topics by their prominence score in the corpus # The topic score combines the total weight of each a topic in the corpus # with a topic document purity score for topic # Topics with topicScore > 1 are generally very strong topics topicScore = (numTopics * topicProbs) * (2 * topicPurity) topicRanking = (-topicScore).argsort() ``` ### Print Out Topical Summary of the Entire Corpus ``` print ("Rank ID Score Prob Purity Summary") for i in range(0, numTopics): topicID = topicRanking[i] print (" %3d %3d %6.3f (%5.3f, %4.3f) %s" % (i+1, topicID, topicScore[topicID], 100*topicProbs[topicID], topicPurity[topicID], topicSummaries[topicID])) ``` ### Save LDA Topic Summaries ``` # If you want to save out the summaries to file makes saveFile flag True if saveFile: fp = open(os.path.join(os.environ['AZUREML_NATIVE_SHARE_DIRECTORY'], "CongressionalDocTopicSummaries.tsv"), "w") i = 0 fp.write("TopicID\tTopicSummary\n") for line in topicSummaries: fp.write("%d\t%s\n" % (i, line)) i += 1 fp.close() ``` ### Next The topic model summization step is finished. The next will be topic modeling analysis which will be in the fifth notebook of the series: [`5_Topic_Model_Analysis.ipynb`](5_Topic_Model_Analysis.ipynb).
github_jupyter
# Sparsity in activations and gradients for VGG16 ``` % matplotlib inline import os import numpy as np import tensorflow as tf import matplotlib.pyplot as plt log_path = '/home/rbodo/.snntoolbox/data/imagenet/vgg16_trained/logs' log_files = os.listdir(log_path) batch_size = 8 plotproperties = {'font.size': 10, 'axes.titlesize': 'large', 'axes.labelsize': 'large', 'xtick.labelsize': 'large', 'xtick.major.size': 2, 'xtick.minor.size': 1, 'ytick.labelsize': 'large', 'ytick.major.size': 2, 'ytick.minor.size': 1, 'legend.fontsize': 'large', 'figure.figsize': (3, 2), 'savefig.dpi': 300, 'savefig.format': 'jpg'} import matplotlib as mpl mpl.rcParams.update(plotproperties) def get_sparsity(hist): i_min = np.nonzero(np.array(hist.bucket_limit) > 0)[0][0] return hist.bucket[i_min] / hist.num data = tf.train.summary_iterator(os.path.join(log_path, log_files[0])) for _ in range(4): e = data.__next__() grad = e.summary.value[113] v = grad.histo grad.tag plt.scatter(v.bucket_limit, v.bucket) plt.xlabel('Gradient value') plt.ylabel('# occurrences') plt.savefig(os.path.join(log_path, 'gradient_distribution16'), bbox_inches='tight') plt.scatter(v.bucket_limit, np.cumsum(v.bucket)) pdf = np.diff(np.cumsum(v.bucket))/np.ediff1d(v.bucket_limit) plt.plot(pdf) idx = np.argmax(pdf) print(v.bucket_limit[idx-5:idx+5]) print(v.bucket[idx-5:idx+5]) pdf[idx]/np.sum(pdf) gradients_e[grad.tag] activations_e[grad.tag] data = tf.train.summary_iterator(os.path.join(log_path, log_files[0])) activations = {} gradients = {} gradient_sparsity = {} for e in data: for v in e.summary.value: if 'out' in v.tag: label = v.tag if not label in activations.keys(): activations[label] = [] activations[label].append(get_sparsity(v.histo)) elif '_grad_sparsity' in v.tag: label = v.tag if not label in gradient_sparsity.keys(): gradient_sparsity[label] = [] gradient_sparsity[label].append(v.simple_value) elif 'grad' in v.tag: label = v.tag if not label in gradients.keys(): gradients[label] = [] gradients[label].append(get_sparsity(v.histo)) num_epochs = int(len(list(activations.values())[0]) / batch_size / 4) activations_e = {} for key, value in activations.items(): activations_e[key] = [value[4*batch_size*(i+1)-1] for i in range(num_epochs)] gradients_e = {} for key, value in gradients.items(): gradients_e[key] = [value[4*batch_size*(i+1)-1] for i in range(num_epochs)] gradient_sparsity_e = {} for key, value in gradient_sparsity.items(): gradient_sparsity_e[key] = [value[4*batch_size*(i+1)-1] for i in range(num_epochs)] def plot_sparsity(data_dict, title): for label, value in data_dict.items(): plt.plot(np.array(value) * 100, '.', label=label[:-4]) #plt.legend(loc='right') #plt.title('Sparsity in activations of VGG16') plt.xlabel('Epoch') plt.ylabel('Sparsity [%]') plt.ylim(0, 100) plt.savefig(os.path.join(log_path, title), bbox_inches='tight') plot_sparsity(gradients_e, 'gradient_sparsity') plot_sparsity(activations_e, 'activation_sparsity') g = gradient_sparsity['fc2/kernel_0_grad_sparsity'] plt.plot(g) [g[32*i + 31] for i in range(6)] ```
github_jupyter
## Regular Gridded Data Structures / ndarrays in Python for Engineers and Geoscientists ### Michael Pyrcz, Associate Professor, University of Texas at Austin #### Contacts: [Twitter/@GeostatsGuy](https://twitter.com/geostatsguy) | [GitHub/GeostatsGuy](https://github.com/GeostatsGuy) | [www.michaelpyrcz.com](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) This is a tutorial for / demonstration of **Regular Gridded Data Structures in Python**. In Python, a common tool for dealing with Regular Gridded Data Structures is the *ndarray* from the **NumPy Python package** (by Jim Hugunin et al.). This tutorial includes the methods and operations that would commonly be required for Engineers and Geoscientists working with Regularly Gridded Data Structures for the purpose of: 1. Data Checking and Cleaning 2. Data Mining / Inferential Data Analysis 3. Predictive Modeling for Data Analytics, Geostatistics and Machine Learning. ##### Regular Data Structures In Python we will commonly store our data in two formats, tables and arrays. For sample data with typically multiple features $1,\ldots,m$ over $1,\ldots,n$ samples we will work with tables. For exhaustive 2D maps and 3D models (usually representing a single feature) on a regular grid over $[1,\ldots,n_{1}], [1,\ldots,n_{2}],\ldots,[1,\ldots,n_{ndim}]$, where $n_{dim}$ is the number of dimensions, we will work with arrays. Of course, it is always possible to add another dimension to our array to include multiple features, $1,\ldots,m$, over all locations. In geostatistical workflows the tables are typically sample data from wells and drill holes and the grids are the interpolated or simulated models or secondary data from sources such as seismic inversion. The NumPy package provides a convenient *ndarray* object for working with regularly gridded data. In the following tutorial we will focus on practical methods with *ndarray*s. There is another section available on Tabular Data Structures that focuses on DataFrames at https://github.com/GeostatsGuy/PythonNumericalDemos/blob/master/PythonDataBasics_DataFrame.ipynb. #### Project Goal Learn the basics for working with Regular Gridded Data Structures in Python to build practical subsurfrace modeling and machine learning workflows. #### Caveats I included methods that I have found useful for building my geo-engineering workflows for subsurface modeling. I think they should be accessible to most geoscientists and engineers. Certainly, there are more advanced, more compact, more efficient methods to accomplish the same tasks. I tried to keep the methods simple. I appreciate feedback and I will use it to improve this tutorial periodically. #### Load the required libraries The following code loads the required libraries. ``` import os # set current working directory import numpy as np # ndarrays import matplotlib.pyplot as plt # plotting from scipy import stats # summary stats ``` If you get a package import error, you may have to first install some of these packages. This can usually be accomplished by opening up a command window on Windows and then typing 'python -m pip install [package-name]'. More assistance is available with the respective package docs. #### Declare functions These are the functions we have included here: 1. GSLIB2ndarray - load GSLIB Geo-EAS format regular grid data 1D or 2D to NumPy *ndarray* 2. ndarray2GSLIB - write NumPy array to GSLIB Geo-EAS format regular grid data 1D or 2D 3. pixelplt - plot 2D NumPy arrays with same parameters as GSLIB's pixelplt I include and demonstrate the GSLIB Geo-EAS file read and write functions, because (1) *ndarray* read and write member functions are convenience functions that are limited and (2) for geostatistical modeling it is conveneint to read and write from Geo-EAS the format used in GSLIB by Deutsch and Journel (1998). Also, I included a function that reimpliments the 2D array plotting program 'pixelplt' from GSLIB. The inputs are simple and the method is consistent with GSLIB, and by using it we postpone having to learn the MatPlotLib package for plotting. Warning, there has been no attempt to make these functions robust in the precense of bad inputs. If you get a crazy error check the inputs. Are the arrays the correct dimension? Is the parameter order mixed up? Make sure the inputs are consistent with the descriptions in this document. ``` # utility to convert 1D or 2D numpy ndarray to a GSLIB Geo-EAS file for use with GSLIB methods def ndarray2GSLIB(array,data_file,col_name): file_out = open(data_file, "w") file_out.write(data_file + '\n') file_out.write('1 \n') file_out.write(col_name + '\n') if array.ndim == 2: ny = (array.shape[0]) nx = (array.shape[1]) ncol = 1 for iy in range(0, ny): for ix in range(0, nx): file_out.write(str(array[ny-1-iy,ix])+ '\n') elif array.ndim == 1: nx = len(array) for ix in range(0, nx): file_out.write(str(array[ix])+ '\n') else: Print("Error: must use a 2D array") file_out.close() return file_out.close() # utility to convert GSLIB Geo-EAS files to a 1D or 2D numpy ndarray for use with Python methods def GSLIB2ndarray(data_file,kcol,nx,ny): colArray = [] if ny > 1: array = np.ndarray(shape=(ny,nx),dtype=float,order='F') else: array = np.zeros(nx) with open(data_file) as myfile: # read first two lines head = [next(myfile) for x in range(2)] line2 = head[1].split() ncol = int(line2[0]) # get the number of columns for icol in range(0, ncol): # read over the column names head = [next(myfile) for x in range(1)] if icol == kcol: col_name = head[0].split()[0] if ny > 1: for iy in range(0,ny): for ix in range(0,nx): head = [next(myfile) for x in range(1)] array[ny-1-iy][ix] = head[0].split()[kcol] else: for ix in range(0,nx): head = [next(myfile) for x in range(1)] array[ix] = head[0].split()[kcol] return array,col_name # pixel plot, reimplemention in Python of GSLIB pixelplt with MatPlotLib methods (commented out image file creation) def pixelplt(array,xmin,xmax,ymin,ymax,step,vmin,vmax,title,xlabel,ylabel,vlabel,cmap,fig_name): xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax, ymin, -1*step)) plt.figure(figsize=(8,6)) im = plt.contourf(xx,yy,array,cmap=cmap,vmin=vmin,vmax=vmax,levels=np.linspace(vmin,vmax,100)) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) cbar = plt.colorbar(im,orientation = 'vertical',ticks=np.linspace(vmin,vmax,10)) cbar.set_label(vlabel, rotation=270, labelpad=20) # plt.savefig(fig_name + '.' + image_type,dpi=dpi) plt.show() return im ``` #### Set the working directory I always like to do this so I don't lose files and to simplify subsequent read and writes (avoid including the full address each time). Also, in this case make sure to place the required (see below) data file in this directory. When we are done with this tutorial we will write our new dataset back to this directory. ``` os.chdir("c:/PGE383") # set the working directory ``` #### Loading and Writing Let's load the 2D porosity map from the provide binary file. This file was created with the NumPy *ndarray* member function 'tofile'. Note: this and the read from file member function, *fromfile*, are convenience functions. They do not store any information about the array. So when we read our 100 x 100 array this results in a 10,000 1D array. Let's try for ourselves. We can read the binary to an array like this: ``` porosity_map = np.fromfile('porosity_truth_map.dat') ``` Next, let's look at the shape member: ``` porosity_map.shape ``` Confirmed, the shape is (10000,), a 10,000 node 1D array. Given we know it is actually a 100x100 array, we can use the *ndarray* member function *reshape* to correct this. Note, you get an error if the sizes are inconsistent, $\prod^{i} n_{i} \neq n_{1D}$ where $n_{i}$ is the number of nodes for axis $i$ and $n_{1D}$ is the number of nodes in the 1D vector that was read in. We reshape the array to 100x100, print the results and then get the 'ndarray' member 'shape' elements 0 and 1 to confirm the $n_{1} = n_{2} = 100$. ``` porosity_map = np.reshape(porosity_map,[100,100]) # reshape the array to 100 x 100 print(porosity_map.shape) ny = porosity_map.shape[0] # get the array nx nx = porosity_map.shape[1] # get the array ny print('Our 2D array has number of x cells = ' + str(nx) + ', and y cells = ' + str(ny) + '.' ) ``` Let's close the loop and write out the array and read it back in, to demonstrat the *ndarray* writing member function *tofile*. ``` porosity_map.tofile("porosity_test.dat") # save our 2D array to a 1D binary file porosity_test = np.fromfile('porosity_test.dat') # read the 1D binary back to a 1D array check = np.array_equal(porosity_map.flatten(),porosity_test) # check if the read in array is the same as flatten orig. print('The array we wrote out and read back in are the same, we closed the loop,' + str(check) + '.') ``` It worked! We used the NumPy function 'array_equal' to test if the arrays are the same. Did you notice I added the *flatten* member function? This caused the 100x100 'porosity_map' array to be passed to the *array_equal* as a 10,000 node 1D array, the same as 'porosity_test' array was loaded. We can write an array and read it back in and we get the same thing. Let's check out using .csv files to store a 2D ndarray. ``` np.savetxt("porosity_map.csv", porosity_map, delimiter=",") ``` The 2D ndarray is saved with each line containing a row and each column delimited by a comma. In this format the 2D grid can be directly loaded into Excel. One can use conditional formatting to conduct a very quick check of the 'look' of the data. E.g. confirm that it is not upside down, scrambled etc. ``` porosity_map_test = np.loadtxt("porosity_map.csv", delimiter=",") # load the csv file back into a 2D ndarray test = np.array_equal(porosity_map,porosity_map_test) # check if the arrays are the same print(test) ``` OK, we confirmed that the save and reloaded 2D ndarray is the same as the original 2D ndarray. This save and load method works. Lets perform the same test for the included GeostatsPy functions to save and load gridded data in Geo-EAS format (this is the format used by GSLIB programs). ``` ndarray2GSLIB(porosity_map,"porosity_map_GSLIB.out","porosity") # save the gridded data to Geo-EAS format porosity_map_test2, col_name = GSLIB2ndarray("porosity_map_GSLIB.out",0,nx,ny) test = np.array_equal(porosity_map,porosity_map_test2) # check if the arrays are the same print(test) ``` OK, we confirmed that the GeostatsPy methods for saving and loading 2D gridded data work. #### Visualization Let's look at the dataset that we loaded. Instead of working with the MatPlotLib package directly (common data visualization package for Python) we will use the *pixelplt* reimplimentation from our set of functions from my effort to bring GSLIB to Python, the 'in-progress' GeostatsPy package. This function uses MatPlotLib with the function parameters to build a nice figure, so we can procastinate learning MatPlotLib for now! First let's set some parameters, including the spatial limits of the plot, the cell sizes in the plot and the min and max feature values and color map for the color bar. Our regular grid is 100 x 100 cells of 10 m cells (i.e. squares), 1,000 x 1,000 m in extents and we assume the origin, low left corder is at coordinate 0,0. Our porosity values are contained within the interval between 4 to 16%. ``` xmin = 0.0;xmax = 1000.0; ymin = 0.0; ymax = 1000.0; cell_size = 10.0; vmin = 4.0; vmax = 16.0; cmap = plt.cm.plasma ``` Now we are ready to plot the 2D array with the *pixpelplt* reimplementation from our GSLIB in Python. ``` pixelplt(porosity_map,xmin,xmax,ymin,ymax,cell_size,vmin,vmax,"Porosity Truth Map","X(m)","Y(M)","Porosity (%)",cmap,"Porosity_Map") ``` The NumPy package *ndarray* docs recommend that users consider making their own functions to read and write *ndarray*s from ASCII files. We have coded functions to do this using the GSLIB Geo-EAS format, to support geostatistical workflows that utilize GSLIB programs as part of the GeostatsPy package that we are developing. We included the read and write functions here for this tutorial. You can look at a truncated representation of the *ndarray* like this. Sometimes a good way to check data is to just look at it. ``` print(porosity_map) ``` You can see that the 2D array is actually an array of arrays, e.g. an array of $1,\ldots,n_{x}$ of arrays of $1,\ldots,n_{y}$. To show this we can include an index for x and we will get a slice for all values with equal $x$ index. Let's look at the the first slice of $y$ values with x index equal to zero. ``` porosity_map[0] ``` If we add another index we get a single node from the 2D array. Let's get the first and last values from this slice with $x$ index equal to zero. We will print them and you can confirm they are the first and last values from the output above. ``` print(porosity_map[0][0]) # get first and last value for ix = 0 slice print(porosity_map[0][99]) ``` Alternatively, you can use this notation to access a single cell in a *ndarray*. ``` print(porosity_map[0,0]) # get first and last value for ix = 0 slice print(porosity_map[0,99]) ``` You could get access to a range of values of the array like this (see below). We get the results for *porosity_map* indices $ix = 0$ and $iy = 0,1,\ldots,9$. ``` print(porosity_map[0][0:10]) # get first 10 values for the ix = 0 slice ``` If you want to see the entire array without truncated representation then you change the print options threshold in NumPy to a *NaN* like this. Note, this is probably not a good idea if you are working with very large arrays. For this example you can literally look through 10,000 values! ``` np.set_printoptions(threshold=np.nan) # remove truncation from array visualization print(porosity_map) ``` #### Summary Statistics Let's try some summary statistics. Here's a convenient method from SciPy. Like many of the methods it anticipates a 1D array so we do a *flatten* on the 2D array to convert it to a 1D array before passing it. ``` stats = stats.describe(porosity_map.flatten()) # array summary statistics stats ``` We also have a variety of built in summary statistic calculations that we may apply on *ndarray*s. Note, these methods work directly with our 2D array; therefore, do not require flatening to a 1D array. ``` mean_por = porosity_map.mean() # array summary statistics stdev_por = porosity_map.std() min_por = porosity_map.min() max_por = porosity_map.max() print('Summary Statistics of Porosity \n Mean = ' + str(mean_por) + ', StDev = ' + str(stdev_por)) print(' Min = ' + str(min_por) + ', Max = ' + str(max_por)) ``` We can also do this with NumPy functions that work with arrays that calculate the previous summary statistics and more. ``` mean_por = np.mean(porosity_map) # array summary statistics stdev_por = np.std(porosity_map) min_por = np.min(porosity_map) max_por = np.max(porosity_map) P10_por,P90_por = np.percentile(porosity_map,[0.10,0.90]) print('Summary Statistics of Porosity \n Mean = ' + str(mean_por) + ', StDev = ' + str(stdev_por)) print(' Min = ' + str(min_por) + ', Max = ' + str(max_por)) print(' P10 = ' + str(P10_por) + ', P90 = ' + str(P90_por)) ``` #### Checking and Manipulating We can read and write individual value of our array with indices $ix = 0,\ldots,nx-1$ and $iy = 0,\ldots,ny-1$. ``` local_por = porosity_map[0,0] # get porosity at location 0,0 print('Porosity at location 0,0 in our ndarray is ' + str(local_por) + '.') porosity_map[0,0] = 10.0000 # change the porosity value at location 0,0 print('Porosity at location 0,0 in our ndarray is now ' + str(porosity_map[0,0]) + '.') ``` We can also check for *NaN*s, invalid or missing values in our *ndarray*. ``` porosity_map[0,0] = np.nan print('Porosity at location 0,0 in our ndarray is now ' + str(porosity_map[0,0]) + '.') ``` We can check for any *NaN*'s in our array with the following code. First, let's add a couple more *NaN* values to make this example more interesting. ``` porosity_map[0,1] = np.nan # add another NaN porosity_map[2,1] = np.nan # add another NaN result = np.isnan(porosity_map).any() result ``` Ok, so now we kown that we have *NaN*'s in our array. This could cause issues with our calculations. We can get a list of indices with *NaN*'s in our *ndarray*. ``` nan_list = np.argwhere(np.isnan(porosity_map)) # get list of indices of array with NaNs print(nan_list) ``` We now have a list of the indices (0,0), (0,1) and (2,1) with *NaN*'s. This is exactly the array indices that we assigned to NaN. If you convert this list of indices by mapping them with *map* to *tuple*s and make that into a new list we get something we can use to directly interact with the *NaN*'s in our 2D *ndarray*. ``` nan_list_tuple = list(map(tuple, nan_list)) # convert index list to tuple list print(nan_list_tuple) # check the tuple list print(porosity_map[nan_list_tuple[0]]) # get the values at the indices print(porosity_map[nan_list_tuple[1]]) print(porosity_map[nan_list_tuple[2]]) ``` Now that we have this list of array coordinate, list of tuples, we can use this to actually access those locations. Here we use those locations (there should be 3 *NaN*'s) to replace the missing values with very small porosity values (0.0001). ``` print('Value at the first NaN indices is ' + str(porosity_map[nan_list_tuple[0]]) + '.') # get value at first index porosity_map[nan_list_tuple[0]] = 0.001 # set the NaN's to a low porosity value porosity_map[nan_list_tuple[1]] = 0.001 porosity_map[nan_list_tuple[2]] = 0.001 print('Value at the first NaN indices after setting to 0.001 is ' + str(porosity_map[nan_list_tuple[0]]) + '.') ``` #### Making Arrays There are various methods to make *ndarray*s from scratch. In some cases, our arrays are small enough we can just write them like this. ``` my_array = np.array([[0,1,2],[4,5,6],[7,8,9]]) # make an ndarray by scratch print(my_array.shape) my_array ``` We now have a 3 x 3 *ndarray*. We can also use NumPy's *rand* to make an *ndarray* of any shape with random values between 0 and 1 and *zeros* to make an array of any shape with 0's. ``` from scipy import stats # summary stats rand_array = np.random.rand(100,100) # make 100 x 100 node array with random values print('Shape of the random array = ' + str(rand_array.shape)) print(stats.describe(rand_array.flatten())) pixelplt(rand_array,xmin,xmax,ymin,ymax,cell_size,0,1,"Random Values","X(m)","Y(M)","Random",cmap,"random") zero_array = np.zeros((100,100)) # make 100 x 100 node array with zeros print('Shape of the zero array = ' + str(zero_array.shape)) print(stats.describe(zero_array.flatten())) pixelplt(zero_array,xmin,xmax,ymin,ymax,cell_size,-1,1,"Zeros","X(m)","Y(M)","Zeros",cmap,"zeros") ``` #### Operations We can search for values in our array with any criteria we like. In this example we identify all nodes with porosity values greater than 15%, the result of *porosity > 15.0* is a boolean array (true and false) with true when that criteria is met. We apply that to the *porosity_map* *ndarray* to return all node values with true in a new array. We can check the size of that array to get the total number of nodes with porosity values greater than 15. ``` greater_than = porosity_map[porosity_map > 15.0] # make boolean array and get values that meet criteria print(greater_than) print('There are ' + str(greater_than.size) + ' of a total of ' + str(porosity_map.flatten().size) + '.') ``` We can actually plot the boolean array (true = 1 and false = 0 numerically) to get a map of the nodes that meet the criteria. We do that below with porosity > 13% because it looks more interesting than only 25 nodes for the porosity > 15% case. ``` thresh_porosity_map = porosity_map > 13.0 pixelplt(thresh_porosity_map,xmin,xmax,ymin,ymax,cell_size,0,1,"Porosity > 13%","X(m)","Y(M)","Boolean",cmap,"threshold") ``` How would you get a list of the indices that meet the criteria in the *porosity map* array? We repeat the command to make a list of tuples with locations with porosity > 15%, *loc_hig_por*. Then we simply grab the ix and iy index values from this list. The list is set up like this, my_list[0 for ix, 1 for iy][1 to number of nodes] ``` loc_high_por = np.nonzero(porosity_map > 15) # get the indices with high porosity print('Loc #1, ix = ' + str(loc_high_por[1][0]) + ' and iy = ' + str(loc_high_por[0][0]) + '.') print(' With a value of ', str(porosity_map[loc_high_por[0][0],loc_high_por[1][0]]) + '.') print('Loc #2, ix = ' + str(loc_high_por[1][1]) + ' and iy = ' + str(loc_high_por[0][1]) + '.') print(' With a value of ', str(porosity_map[loc_high_por[0][1],loc_high_por[1][1]]) + '.') loc_high_por ``` Perhaps you want to do something more creative with your *ndarray*. The most flexible approach is to use a loop and iterate over the array. Let's add noise to our porosity map. To do this we take the previously calculated random array and center it (set the mean to 0.0 by subtracting the current mean), we will multiply it by a factor of 5 so that the result is more noticable and add it to the *porosity_map* array. ``` porosity_map_noise = np.zeros((100,100)) # use of loops to maniputale ndarrays for iy in range(ny): for ix in range(nx): porosity_map_noise[iy,ix] = porosity_map[iy,ix] + (rand_array[iy,ix]-0.5)*5 print(stats.describe(porosity_map_noise.flatten())) pixelplt(porosity_map_noise,xmin,xmax,ymin,ymax,cell_size,0,16,"Porosity With Noise","X(m)","Y(M)","Porosity (%)",cmap,"Residual") ``` We could have done the above without the loops, by using the simple statement below. We can use algebriac operators on *ndarray*s like this example below if the *ndarray* are all the same size. ``` porosity_map_noice2 = porosity_map + (rand_array-0.5) * 5 # using matrix algebra to repeat the previous looped method print(stats.describe(porosity_map_noise.flatten())) pixelplt(porosity_map_noise,xmin,xmax,ymin,ymax,cell_size,0,16,"Porosity With Noise","X(m)","Y(M)","Porosity (%)",cmap,"Residual2") ``` Let's write our new *ndarray* to a file for storage and to apply with other software such as GSLIB. ``` ndarray2GSLIB(porosity_map_noise,"porosity_noise_GSLIB.dat","porosity_noise") # write out 2D array to a Geo-DAS ASCII file ``` #### More Exercises There are so many more exercises and tests that one could attempt to gain experience with the NumPy package, *ndarray* objects in Python. I'll end here for brevity, but I invite you to continue. Check out the docs at https://docs.scipy.org/doc/numpy-1.14.0/reference/arrays.ndarray.html. I'm always happy to discuss, *Michael* Michael Pyrcz, Ph.D., P.Eng. Associate Professor The Hildebrand Department of Petroleum and Geosystems Engineering, Bureau of Economic Geology, The Jackson School of Geosciences, The University of Texas at Austin On twitter I'm the @GeostatsGuy.
github_jupyter
# Important Math Utilities for Principles of Planetary Climate This *notebook* shows how the tasks previously accomplished by *ClimateUtilities.py* and *ClimateGraphicsMPL.py* can be done with *pandas* and *scipy*. Compare this notebook to `old_ClimateUtilities\CoursewareGuide.ipynb` ``` import sys sys.path.append(r'D:\Users\thomas\Coding\Python\climate\python3\utilities') import numpy as np import matplotlib.pyplot as plt import pandas as pd ``` ### DataFrames `DataFrames` replace the `Curve` objects, as an all-purpose data object cooked up in order to make it easier to read, write, manipulate and plot tabular data. ``` df = pd.DataFrame() ``` Having created it, we can now put some data in it. ``` df['x'] = list(range(10)) ``` You refer to a column of data using its name, and the name of the column is treated as an "index" into the data. Here's a simple example, also illustrating that what is returned by the indexing operation is an array that you can do array arithmetic on: ``` print(df['x']) print(df['x']**2) ``` Now let's add a second data column, which we'll call `y` ``` yData = [2.*xx**2 for xx in range(10)] df['y'] = yData ``` If you ever need to know what the names of the columns in a `DataFrame` object are, just use the property `columns`: ``` df.columns ``` You can do arithmetic on columns, and even store the results in a new named column, which will be created automatically: ``` df['y'] = df['y'] + 1. df['z'] = 10.*df['x'] + df['y'] print(df['y']) print(df['z']) ``` You can save your data to a tab-delimited text file using the `to_cvs(...)` method. (If you are new to *pandas*, also check out the other formats you can write to, by typing `df.to_+TAB`.) The argument of the method is the filename: ``` df.to_csv('out.txt', sep='\t', index=False) ``` You can read tabular data into a `DataFrame` object using the `read_csv(...)` function. (Again, thera are many `pd.read_ ...` functions available.) Here we are reading our data back in again: ``` data = pd.read_csv('out.txt', sep='\t') print(data) ``` `read_csv` has a large number of options, which allow you to read in almost all kind of data. You can make a new DataFrame object consisting of a subset of the original data: ``` df_subset = df[['x','z']] print(df_subset) ``` ### Line plots with DataFrame objects ``` %matplotlib inline ``` And here is an example plotting the `DataFrame` `df` defined earlier ``` df.plot() ``` If you selecting a given axis for the abscissa, the labels are automatically generated. Otherwise, you can use standard *matplotlib* to format the plot. ``` df.plot('x', 'y') plt.title('Temperature vs time') plt.ylabel('Temperature [K]') ``` You can reverse axes, use log-scales, etc ``` df.plot('x', 'y', logy=True) ah = plt.gca() ah.invert_yaxis() ``` You can plot multiple lines on the same graph: ``` df.plot('x', ['y', 'z']) ``` ### Numerical Analysis #### Root finding by Newton's method Here is an example of creating a root finder object for the problem $ x^2 - a = 0$. ``` from scipy.optimize import newton def f(x): return x*x - 2 initial_guess = 1 m = newton(f, initial_guess) print(m) ``` Here's an example where the parameters of the function are passes as a list of two values ``` def f(x, n, a): return x**n - a initial_guess = 1 m = newton(f, initial_guess, args=(3, 2)) #n=3,a=2 print(m) ``` #### Quadrature Numerical quadrature is the process of evaluating a definite integral such as $$\int_a^b f(x) dx$$ There are many ways of numerically implementing quadrature. Trapezoidal rule is a simple one. The Romberg interpolation method gives a highly accurate approximation to a definite integral, if the function $f$ is reasonably smooth. (It can behave poorly if the function has discontinuities). Here is an example, where we evaluate the integral between $x=0$ and $x=1$: ``` from scipy.integrate import romberg def f(x): return x m = romberg(f, 0, 1) print(m) ``` Here's an example of a function that needs parameters specified. ``` def f(x, g, n, tau): return np.exp(-g(x)**n/tau) def g(x): return x**2 parameters = (g, 2, 2) m = romberg(f, 0, 1, args=parameters) print(m) ``` In a case like this, where you might not need the function `g` again, you could use a `lambda` function as the parameter argument and save several lines. Here we are doing the integral for the case $g(x) = x\sin(x)$ ``` m = romberg(f, 0, 1, args=(lambda x: x*np.sin(x),2.,2.)) print(m) ``` #### Interpolation There are several general interpolation facilities available in SciPy, for data in 1, 2, and higher dimensions: * A class representing an interpolant (interp1d) in 1-D, offering several interpolation methods. * Convenience function griddata offering a simple interface to interpolation in N dimensions (N = 1, 2, 3, 4, ...). Object-oriented interface for the underlying routines is also available. * Functions for 1- and 2-dimensional (smoothed) cubic-spline interpolation, based on the FORTRAN library FITPACK. There are both procedural and object-oriented interfaces for the FITPACK library. * Interpolation using Radial Basis Functions. ##### 1-D interpolation (interp1d) The interp1d class in scipy.interpolate is a convenient method to create a function based on fixed data points which can be evaluated anywhere within the domain defined by the given data using linear interpolation. An instance of this class is created by passing the 1-d vectors comprising the data. The instance of this class defines a `__call__` method and can therefore by treated like a function which interpolates between known data values to obtain unknown values (it also has a docstring for help). Behavior at the boundary can be specified at instantiation time. The following example demonstrates its use, for linear and cubic spline interpolation: ``` from scipy.interpolate import interp1d # Generate some data x = np.linspace(0, 10, num=11, endpoint=True) y = np.cos(-x**2/9.0) xnew = np.linspace(0, 10, num=41, endpoint=True) # Make a "linear" and a "cubic" interpolation object f = interp1d(x, y) f2 = interp1d(x, y, kind='cubic') # Calculate and plot the interpolated data plt.plot(x, y, 'o', label='data') plt.plot(xnew, f(xnew), '-', label='linear') plt.plot(xnew, f2(xnew),'--', label='cubic') plt.legend() plt.show() ``` #### Numerical integration of ordinary differential equations Many tasks require a numerical integration of ordinary differential equations of the form $$ \frac{dY}{dt} = F(t,Y) $$ $Y$ is the dependent variable snd $t$ is the independent variable. In your own application, these might have different names. This equation could represent a first order system in which the solution $Y(t)$ is just a real (or complex) valued function of $t$, but it could also represent a higher order system in which both $Y$ and the slope function $F$ are vectors (arrays). ``` from scipy.integrate import odeint def slope(x, t): #Function must have independent variable as an argument, even if not used return np.cos(t) yStart = 0 t = np.linspace(0, 4*np.pi, 101) y = odeint(slope, yStart, t) plt.plot(t,y) ``` Here's an example for a case where the slope function has parameters: ``` def slope(x, t, a, b): return -a* x**b * t xStart = 0.1 t = np.linspace(0,5, 101) a, b = 0.1, 2 # set the parameters x = odeint(slope,xStart,t, args=(a,b)) plt.plot(t, x) ``` Now we'll illustrate how to integrate a higher order system. As our example we'll use the system $$ \frac{dx}{dt} = - y , $\frac{dy}{dt} = x$$ which will be re-cast in the vector form $$\frac{d}{dt} \left[ \begin{array}{c} x\\y \end{array} \right] = \left[ \begin{array}{c} -y\\x \end{array} \right] $$ Thus the slope function $F$ is the vector on the right hand side, and the "state variable" is the vector on the left hand side, which we'll call $S$. $F$ is a function of $S$. Here is how we implement the solution. `integrator` is used exactly the same way. All we need to do is to write the slope function so that it takes a `numpy` array as input and produces a `numpy` array as output. Note that the array returned must be a *newly created* array . The initial value for the dependent variable given to `integrator` must also be a `numpy` array. ``` def F(S, t): x,y = S return np.array([-y,x]) S_start = np.array([1.,0.]) t = np.linspace(0, 2*np.pi, 101) S = odeint(F, S_start,t) fig = plt.figure() ax = fig.add_subplot(111, aspect='equal') ax.plot(S[:,0], S[:,1]) ax.set_xlim(-1, 1) ax.set_ylim(-1,1) ``` And here we are doing the integration and plotting the results in the x-y plane Did you expect to see a circle? I hope so!
github_jupyter
# Logistic Regression with a Neural Network mindset Welcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and will also hone your intuitions about deep learning. **Instructions:** - Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so. - Use `np.dot(X,Y)` to calculate dot products. **You will learn to:** - Build the general architecture of a learning algorithm, including: - Initializing parameters - Calculating the cost function and its gradient - Using an optimization algorithm (gradient descent) - Gather all three functions above into a main model function, in the right order. ## Table of Contents - [1 - Packages](#1) - [2 - Overview of the Problem set](#2) - [Exercise 1](#ex-1) - [Exercise 2](#ex-2) - [3 - General Architecture of the learning algorithm](#3) - [4 - Building the parts of our algorithm](#4) - [4.1 - Helper functions](#4-1) - [Exercise 3 - sigmoid](#ex-3) - [4.2 - Initializing parameters](#4-2) - [Exercise 4 - initialize_with_zeros](#ex-4) - [4.3 - Forward and Backward propagation](#4-3) - [Exercise 5 - propagate](#ex-5) - [4.4 - Optimization](#4-4) - [Exercise 6 - optimize](#ex-6) - [Exercise 7 - predict](#ex-7) - [5 - Merge all functions into a model](#5) - [Exercise 8 - model](#ex-8) - [6 - Further analysis (optional/ungraded exercise)](#6) - [7 - Test with your own image (optional/ungraded exercise)](#7) <a name='1'></a> ## 1 - Packages ## First, let's run the cell below to import all the packages that you will need during this assignment. - [numpy](https://numpy.org/doc/1.20/) is the fundamental package for scientific computing with Python. - [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file. - [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python. - [PIL](https://pillow.readthedocs.io/en/stable/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end. ``` import numpy as np import copy import matplotlib.pyplot as plt import h5py import scipy from PIL import Image from scipy import ndimage from lr_utils import load_dataset from public_tests import * %matplotlib inline %load_ext autoreload %autoreload 2 ``` <a name='2'></a> ## 2 - Overview of the Problem set ## **Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labeled as cat (y=1) or non-cat (y=0) - a test set of m_test images labeled as cat or non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px). You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat. Let's get more familiar with the dataset. Load the data by running the following code. ``` # Loading the data (cat/non-cat) train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset() ``` We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing). Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images. ``` # Example of a picture index = 25 plt.imshow(train_set_x_orig[index]) print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.") ``` Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs. <a name='ex-1'></a> ### Exercise 1 Find the values for: - m_train (number of training examples) - m_test (number of test examples) - num_px (= height = width of a training image) Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`. ``` #(≈ 3 lines of code) # m_train = # m_test = # num_px = # YOUR CODE STARTS HERE m_train = train_set_y.shape[1] m_test = test_set_y.shape[1] num_px = train_set_x_orig.shape[1] # YOUR CODE ENDS HERE print ("Number of training examples: m_train = " + str(m_train)) print ("Number of testing examples: m_test = " + str(m_test)) print ("Height/Width of each image: num_px = " + str(num_px)) print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)") print ("train_set_x shape: " + str(train_set_x_orig.shape)) print ("train_set_y shape: " + str(train_set_y.shape)) print ("test_set_x shape: " + str(test_set_x_orig.shape)) print ("test_set_y shape: " + str(test_set_y.shape)) ``` **Expected Output for m_train, m_test and num_px**: <table style="width:15%"> <tr> <td> m_train </td> <td> 209 </td> </tr> <tr> <td>m_test</td> <td> 50 </td> </tr> <tr> <td>num_px</td> <td> 64 </td> </tr> </table> For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns. <a name='ex-2'></a> ### Exercise 2 Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1). A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use: ```python X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X ``` ``` # Reshape the training and test examples #(≈ 2 lines of code) # train_set_x_flatten = ... # test_set_x_flatten = ... # YOUR CODE STARTS HERE train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T # YOUR CODE ENDS HERE # Check that the first 10 pixels of the second image are in the correct place assert np.alltrue(train_set_x_flatten[0:10, 1] == [196, 192, 190, 193, 186, 182, 188, 179, 174, 213]), "Wrong solution. Use (X.shape[0], -1).T." assert np.alltrue(test_set_x_flatten[0:10, 1] == [115, 110, 111, 137, 129, 129, 155, 146, 145, 159]), "Wrong solution. Use (X.shape[0], -1).T." print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape)) print ("train_set_y shape: " + str(train_set_y.shape)) print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape)) print ("test_set_y shape: " + str(test_set_y.shape)) ``` **Expected Output**: <table style="width:35%"> <tr> <td>train_set_x_flatten shape</td> <td> (12288, 209)</td> </tr> <tr> <td>train_set_y shape</td> <td>(1, 209)</td> </tr> <tr> <td>test_set_x_flatten shape</td> <td>(12288, 50)</td> </tr> <tr> <td>test_set_y shape</td> <td>(1, 50)</td> </tr> </table> To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255. One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel). <!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !--> Let's standardize our dataset. ``` train_set_x = train_set_x_flatten / 255. test_set_x = test_set_x_flatten / 255. ``` <font color='blue'> **What you need to remember:** Common steps for pre-processing a new dataset are: - Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...) - Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1) - "Standardize" the data <a name='3'></a> ## 3 - General Architecture of the learning algorithm ## It's time to design a simple algorithm to distinguish cat images from non-cat images. You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!** <img src="images/LogReg_kiank.png" style="width:650px;height:400px;"> **Mathematical expression of the algorithm**: For one example $x^{(i)}$: $$z^{(i)} = w^T x^{(i)} + b \tag{1}$$ $$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$ $$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$ The cost is then computed by summing over all training examples: $$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$ **Key steps**: In this exercise, you will carry out the following steps: - Initialize the parameters of the model - Learn the parameters for the model by minimizing the cost - Use the learned parameters to make predictions (on the test set) - Analyse the results and conclude <a name='4'></a> ## 4 - Building the parts of our algorithm ## The main steps for building a Neural Network are: 1. Define the model structure (such as number of input features) 2. Initialize the model's parameters 3. Loop: - Calculate current loss (forward propagation) - Calculate current gradient (backward propagation) - Update parameters (gradient descent) You often build 1-3 separately and integrate them into one function we call `model()`. <a name='4-1'></a> ### 4.1 - Helper functions <a name='ex-3'></a> ### Exercise 3 - sigmoid Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid(z) = \frac{1}{1 + e^{-z}}$ for $z = w^T x + b$ to make predictions. Use np.exp(). ``` # GRADED FUNCTION: sigmoid def sigmoid(z): """ Compute the sigmoid of z Arguments: z -- A scalar or numpy array of any size. Return: s -- sigmoid(z) """ #(≈ 1 line of code) # s = ... # YOUR CODE STARTS HERE s = 1 / (1 + np.exp(-z)) # YOUR CODE ENDS HERE return s print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2])))) sigmoid_test(sigmoid) x = np.array([0.5, 0, 2.0]) output = sigmoid(x) print(output) ``` <a name='4-2'></a> ### 4.2 - Initializing parameters <a name='ex-4'></a> ### Exercise 4 - initialize_with_zeros Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation. ``` # GRADED FUNCTION: initialize_with_zeros def initialize_with_zeros(dim): """ This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0. Argument: dim -- size of the w vector we want (or number of parameters in this case) Returns: w -- initialized vector of shape (dim, 1) b -- initialized scalar (corresponds to the bias) of type float """ # (≈ 2 lines of code) # w = ... # b = ... # YOUR CODE STARTS HERE w = np.zeros(shape=(dim, 1)) b = 0.0 # YOUR CODE ENDS HERE assert(w.shape == (dim, 1)) assert(isinstance(b, float) or isinstance(b, int)) return w, b dim = 2 w, b = initialize_with_zeros(dim) assert type(b) == float print ("w = " + str(w)) print ("b = " + str(b)) initialize_with_zeros_test(initialize_with_zeros) ``` <a name='4-3'></a> ### 4.3 - Forward and Backward propagation Now that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters. <a name='ex-5'></a> ### Exercise 5 - propagate Implement a function `propagate()` that computes the cost function and its gradient. **Hints**: Forward Propagation: - You get X - You compute $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$ - You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}(y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)}))$ Here are the two formulas you will be using: $$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$ $$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$ ``` # GRADED FUNCTION: propagate def propagate(w, b, X, Y): """ Implement the cost function and its gradient for the propagation explained above Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of size (num_px * num_px * 3, number of examples) Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples) Return: cost -- negative log-likelihood cost for logistic regression dw -- gradient of the loss with respect to w, thus same shape as w db -- gradient of the loss with respect to b, thus same shape as b Tips: - Write your code step by step for the propagation. np.log(), np.dot() """ m = X.shape[1] # FORWARD PROPAGATION (FROM X TO COST) ### START CODE HERE ### (≈ 2 lines of code) # compute activation A = sigmoid(np.dot(X.T,w) + b) cost = -(1/m) * np.sum(Y.T * np.log(A) + (1 - Y.T) * (np.log(1-A)) ) # compute cost ### END CODE HERE ### # BACKWARD PROPAGATION (TO FIND GRAD) ### START CODE HERE ### (≈ 2 lines of code) dw = (1/m) * np.dot(X,(A-Y.T)) db = (1/m) * np.sum(A-Y.T) ### END CODE HERE ### assert(dw.shape == w.shape) assert(db.dtype == float) cost = np.squeeze(cost) assert(cost.shape == ()) grads = {"dw": dw, "db": db} return grads, cost w = np.array([[1.], [2.]]) b = 2. X =np.array([[1., 2., -1.], [3., 4., -3.2]]) Y = np.array([[1, 1, 0]]) grads, cost = propagate(w, b, X, Y) assert type(grads["dw"]) == np.ndarray assert grads["dw"].shape == (2, 1) assert type(grads["db"]) == np.float64 print ("dw = " + str(grads["dw"])) print ("db = " + str(grads["db"])) print ("cost = " + str(cost)) propagate_test(propagate) ``` **Expected output** ``` dw = [[0.99845601] [2.39507239]] db = 0.001455578136784208 cost = 5.801545319394553 ``` <a name='4-4'></a> ### 4.4 - Optimization - You have initialized your parameters. - You are also able to compute a cost function and its gradient. - Now, you want to update the parameters using gradient descent. <a name='ex-6'></a> ### Exercise 6 - optimize Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate. ``` # GRADED FUNCTION: optimize def optimize(w, b, X, Y, num_iterations=100, learning_rate=0.009, print_cost=False): """ This function optimizes w and b by running a gradient descent algorithm Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of shape (num_px * num_px * 3, number of examples) Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples) num_iterations -- number of iterations of the optimization loop learning_rate -- learning rate of the gradient descent update rule print_cost -- True to print the loss every 100 steps Returns: params -- dictionary containing the weights w and bias b grads -- dictionary containing the gradients of the weights and bias with respect to the cost function costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve. Tips: You basically need to write down two steps and iterate through them: 1) Calculate the cost and the gradient for the current parameters. Use propagate(). 2) Update the parameters using gradient descent rule for w and b. """ w = copy.deepcopy(w) b = copy.deepcopy(b) costs = [] for i in range(num_iterations): # (≈ 1 lines of code) # Cost and gradient calculation # grads, cost = ... # YOUR CODE STARTS HERE # YOUR CODE ENDS HERE grads, cost = propagate(w, b, X, Y) ### END CODE HERE ### # Retrieve derivatives from grads dw = grads["dw"] db = grads["db"] # update rule (≈ 2 lines of code) ### START CODE HERE ### w = w - learning_rate * dw b = b - learning_rate * db # YOUR CODE ENDS HERE # Record the costs if i % 100 == 0: costs.append(cost) # Print the cost every 100 training iterations if print_cost: print ("Cost after iteration %i: %f" %(i, cost)) params = {"w": w, "b": b} grads = {"dw": dw, "db": db} return params, grads, costs params, grads, costs = optimize(w, b, X, Y, num_iterations=100, learning_rate=0.009, print_cost=False) print ("w = " + str(params["w"])) print ("b = " + str(params["b"])) print ("dw = " + str(grads["dw"])) print ("db = " + str(grads["db"])) print("Costs = " + str(costs)) optimize_test(optimize) ``` <a name='ex-7'></a> ### Exercise 7 - predict The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions: 1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$ 2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this). ``` # GRADED FUNCTION: predict def predict(w, b, X): ''' Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b) Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of size (num_px * num_px * 3, number of examples) Returns: Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X ''' m = X.shape[1] Y_prediction = np.zeros((1, m)) w = w.reshape(X.shape[0], 1) # Compute vector "A" predicting the probabilities of a cat being present in the picture #(≈ 1 line of code) # A = ... # YOUR CODE STARTS HERE A = sigmoid(np.dot(X.T,w) + b) ### END CODE HERE ### print(range(A.shape[1])) for i in range(A.shape[0]): # Convert probabilities A[0,i] to actual predictions p[0,i] ### START CODE HERE ### (≈ 4 lines of code) #pass if(A[i,0] > 0.5): Y_prediction[0,i] = 1 else: Y_prediction[0,i] = 0 # YOUR CODE ENDS HERE return Y_prediction w = np.array([[0.1124579], [0.23106775]]) b = -0.3 X = np.array([[1., -1.1, -3.2],[1.2, 2., 0.1]]) print ("predictions = " + str(predict(w, b, X))) predict_test(predict) ``` <font color='blue'> **What to remember:** You've implemented several functions that: - Initialize (w,b) - Optimize the loss iteratively to learn parameters (w,b): - Computing the cost and its gradient - Updating the parameters using gradient descent - Use the learned (w,b) to predict the labels for a given set of examples <a name='5'></a> ## 5 - Merge all functions into a model ## You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order. <a name='ex-8'></a> ### Exercise 8 - model Implement the model function. Use the following notation: - Y_prediction_test for your predictions on the test set - Y_prediction_train for your predictions on the train set - parameters, grads, costs for the outputs of optimize() ``` # GRADED FUNCTION: model def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False): """ Builds the logistic regression model by calling the function you've implemented previously Arguments: X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train) Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train) X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test) Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test) num_iterations -- hyperparameter representing the number of iterations to optimize the parameters learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize() print_cost -- Set to True to print the cost every 100 iterations Returns: d -- dictionary containing information about the model. """ # (≈ 1 line of code) # initialize parameters with zeros # w, b = ... #(≈ 1 line of code) # Gradient descent # params, grads, costs = ... # Retrieve parameters w and b from dictionary "params" # w = ... # b = ... # Predict test/train set examples (≈ 2 lines of code) # Y_prediction_test = ... # Y_prediction_train = ... # YOUR CODE STARTS HERE # initialize parameters with zeros (≈ 1 line of code) w, b = initialize_with_zeros(X_train.shape[0]) print(w.shape) # Gradient descent (≈ 1 line of code) parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost) # Retrieve parameters w and b from dictionary "parameters" w = parameters["w"] b = parameters["b"] # Predict test/train set examples (≈ 2 lines of code) Y_prediction_test = predict(w,b, X_test) Y_prediction_train = predict(w,b, X_train) # YOUR CODE ENDS HERE # Print train/test Errors if print_cost: print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) d = {"costs": costs, "Y_prediction_test": Y_prediction_test, "Y_prediction_train" : Y_prediction_train, "w" : w, "b" : b, "learning_rate" : learning_rate, "num_iterations": num_iterations} return d from public_tests import * model_test(model) ``` If you pass all the tests, run the following cell to train your model. ``` logistic_regression_model = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations=2000, learning_rate=0.005, print_cost=True) ``` **Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test accuracy is 70%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week! Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set. ``` # Example of a picture that was wrongly classified. index = 1 plt.imshow(test_set_x[:, index].reshape((num_px, num_px, 3))) print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[int(logistic_regression_model['Y_prediction_test'][0,index])].decode("utf-8") + "\" picture.") ``` Let's also plot the cost function and the gradients. ``` # Plot learning curve (with costs) costs = np.squeeze(logistic_regression_model['costs']) plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title("Learning rate =" + str(logistic_regression_model["learning_rate"])) plt.show() ``` **Interpretation**: You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. <a name='6'></a> ## 6 - Further analysis (optional/ungraded exercise) ## Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$. #### Choice of learning rate #### **Reminder**: In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate. Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens. ``` learning_rates = [0.01, 0.001, 0.0001] models = {} for lr in learning_rates: print ("Training a model with learning rate: " + str(lr)) models[str(lr)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations=1500, learning_rate=lr, print_cost=False) print ('\n' + "-------------------------------------------------------" + '\n') for lr in learning_rates: plt.plot(np.squeeze(models[str(lr)]["costs"]), label=str(models[str(lr)]["learning_rate"])) plt.ylabel('cost') plt.xlabel('iterations (hundreds)') legend = plt.legend(loc='upper center', shadow=True) frame = legend.get_frame() frame.set_facecolor('0.90') plt.show() ``` **Interpretation**: - Different learning rates give different costs and thus different predictions results. - If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost). - A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy. - In deep learning, we usually recommend that you: - Choose the learning rate that better minimizes the cost function. - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.) <a name='7'></a> ## 7 - Test with your own image (optional/ungraded exercise) ## Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)! ``` # change this to the name of your image file my_image = "my_image.jpg" # We preprocess the image to fit your algorithm. fname = "images/" + my_image image = np.array(Image.open(fname).resize((num_px, num_px))) plt.imshow(image) image = image / 255. image = image.reshape((1, num_px * num_px * 3)).T my_predicted_image = predict(logistic_regression_model["w"], logistic_regression_model["b"], image) print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.") ``` <font color='blue'> **What to remember from this assignment:** 1. Preprocessing the dataset is important. 2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model(). 3. Tuning the learning rate (which is an example of a "hyperparameter") can make a big difference to the algorithm. You will see more examples of this later in this course! Finally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include: - Play with the learning rate and the number of iterations - Try different initialization methods and compare the results - Test other preprocessings (center the data, or divide each row by its standard deviation) Bibliography: - http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/ - https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c
github_jupyter
``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` Below is code with a link to a happy or sad dataset which contains 80 images, 40 happy and 40 sad. Create a convolutional neural network that trains to 100% accuracy on these images, which cancels training upon hitting training accuracy of >.999 Hint -- it will work best with 3 convolutional layers. ``` import tensorflow as tf import os import zipfile DESIRED_ACCURACY = 0.999 !wget --no-check-certificate \ "https://storage.googleapis.com/laurencemoroney-blog.appspot.com/happy-or-sad.zip" \ -O "/tmp/happy-or-sad.zip" zip_ref = zipfile.ZipFile("/tmp/happy-or-sad.zip", 'r') zip_ref.extractall("/tmp/h-or-s") zip_ref.close() class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, log={}): if (log.get('accuracy')>DESIRED_ACCURACY): print("\nAccuracy achieved {:.1f}% so cancelling training.".format(DESIRED_ACCURACY)) self.model.stop_training = True callbacks = myCallback() # This Code Block should Define and Compile the Model model = tf.keras.models.Sequential([ # Your Code Here tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(150, 150, 3)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.summary() from tensorflow.keras.optimizers import RMSprop model.compile(optimizer=RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['accuracy']) # This code block should create an instance of an ImageDataGenerator called train_datagen # And a train_generator by calling train_datagen.flow_from_directory from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1./255) # Your Code Here train_generator = train_datagen.flow_from_directory( '/tmp/h-or-s', target_size=(150, 150), batch_size=40, class_mode='binary') # Your Code Here) # Expected output: 'Found 80 images belonging to 2 classes' # This code block should call model.fit and train for # a number of epochs. history = model.fit_generator(train_generator, steps_per_epoch=2, epochs=15, verbose=1, callbacks=[callbacks]) # Your Code Here) # Expected output: "Reached 99.9% accuracy so cancelling training!"" ```
github_jupyter
Running requires having the data on Google Drive or uploading the data to Colab via the left menu and setting the data path correctly. To use TPU, set "tpu" to True and hardware accelerator to "TPU" from Edit -> Notebook Settings ``` seed = 42 import pandas as pd import numpy as np import random np.random.seed(seed) random.seed(seed) tpu = False from google.colab import drive drive.mount('/gdrive') data_path = '/gdrive/My Drive/Colab Notebooks/' import os import tensorflow as tf # This address identifies the TPU we'll use when configuring TensorFlow. if tpu: TPU_WORKER = 'grpc://' + os.environ['COLAB_TPU_ADDR'] tf.logging.set_verbosity(tf.logging.INFO) def tpu_compatibilitate(model): if tpu: return tf.contrib.tpu.keras_to_tpu_model( model, strategy=tf.contrib.tpu.TPUDistributionStrategy( tf.contrib.cluster_resolver.TPUClusterResolver(TPU_WORKER))) else: return model ``` Read data ``` data = pd.read_csv(data_path + 'EEG_data.csv') data.columns ``` The labels for confusion are the same for each video by subject ``` for subjId in set(data.SubjectID): for vidId in set(data.VideoID): assert data.query('SubjectID == {} and VideoID == {}' .format(subjId, vidId))['user-definedlabeln'].mean() in (0.0, 1.0) ``` Load subtitle vectors ``` import numpy as np """ from csv """ #vid_dfs = pd.concat([pd.read_csv(notebook_path + 'subtitles/vid_{}_elmo_embedded_subs.csv'.format(i)) # for i in range(10)], ignore_index=True # ).sort_values(['SubjectID', 'VideoID']).reset_index(drop=True) #vec_cols = [str(x) for x in range(1024)] #sub_vecs = vid_dfs[vec_cols].values.astype('float32') """ save/load from npy """ sub_vec_path = data_path + 'subtitle_vecs.npy' #np.save(sub_vec_path, sub_vecs) sub_vecs = np.load(sub_vec_path) sub_vec_dim = sub_vecs.shape[1] """ Make a dataset of original data combined with sub vecs """ dataset = np.hstack((data.values.astype('float32'), sub_vecs)) ``` PCA to reduce subtitle vector dimensions. Speeds up training and also has the potential to increase performance ``` from sklearn.decomposition import PCA """ PCA to reduce dimension of the word average vectors (might give better results) """ sub_vec_dim = 12 pca = PCA(n_components=sub_vec_dim) pcad_sub_vecs = pca.fit_transform(sub_vecs) dataset = np.hstack((data.values.astype('float32'), pcad_sub_vecs)) dataset.shape ``` Preprocessing as is done in https://github.com/mehmani/DNNs-for-EEG-Signals/blob/master/DNNforEEFSignals.ipynb ``` import numpy as np from sklearn.preprocessing import MinMaxScaler def NormSignal(S, I): #normalize features S=S.reshape(-1, 1) if I not in [0, 1, 13, 14]: scaler = MinMaxScaler(feature_range=(0, 1)) scaled = scaler.fit_transform(S) scaled = scaled else: scaled = S return scaled.reshape(-1).tolist() NormDataG = np.array([NormSignal(dataset[:,i], i) for i in range(dataset.shape[1])]).T print(NormDataG.shape) ``` Additional metrics besides accuracy to have more information on model performance ``` from sklearn.metrics import roc_auc_score, f1_score from tensorflow.keras.callbacks import Callback class f1_auc_callback(Callback): def __init__(self, X_test, y_test, f1s, roc_aucs): self.X_test = X_test self.y_test = y_test.flatten() self.f1s = f1s self.roc_aucs = roc_aucs def on_train_end(self, epoch, logs={}): y_pred = self.model.predict_proba(self.X_test, verbose=0).flatten() roc_test = roc_auc_score(self.y_test, y_pred) f1_test = f1_score(self.y_test, np.round(y_pred)) #print('\r rocauc %s f1 %s' % (str(roc_test), str(f1_test)), end=10*' ' + '\n') self.f1s.append(roc_test) self.roc_aucs.append(f1_test) return ``` Model from the paper "confused or not confused" ``` from tensorflow.keras.datasets import imdb from tensorflow.keras.preprocessing import sequence from tensorflow.keras.models import Sequential from tensorflow.python.keras.layers import BatchNormalization from tensorflow.python.keras.layers import Input, LSTM, Bidirectional, Dense, Flatten, Dropout, TimeDistributed, Conv2D, MaxPooling2D, Masking def get_model_timedist(intervals, n_dim=11): model = Sequential([ #Masking(mask_value=0, input_shape=(A, n_dim)), # Masking does not help for some reason (should help with padded data?) BatchNormalization(input_shape=(intervals, n_dim), axis=2), # New version of keras doesn't support "mode" attribute, which was used in the original code (mode=0) Bidirectional(LSTM(50, return_sequences=False, activation='selu'), input_shape=(intervals, n_dim)), Dense(intervals, activation='sigmoid') ]) model.compile(loss='binary_crossentropy', optimizer='RMSprop', metrics=['binary_accuracy']) return model, (-1, intervals, n_dim) ``` Model from the paper "confused or not confused" with binary output per data point ``` def get_model(intervals, n_dim=11): model = Sequential([ #Masking(mask_value=0, input_shape=(A, n_dim)), # Masking does not help for some reason (should help with padded data?) BatchNormalization(input_shape=(intervals, n_dim), axis=2), Bidirectional(LSTM(50, return_sequences=False, activation='selu'), input_shape=(intervals, n_dim)), #Dropout(0.2), Dense(1, activation='sigmoid') ]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc']) return model, (-1, intervals, n_dim) ``` Model from https://github.com/mehmani/DNNs-for-EEG-Signals ``` def get_mehmani_model(intervals, n_dim=11): model = Sequential() model.add(TimeDistributed(Conv2D(20, (5,5), activation='relu'), input_shape=(1, intervals, n_dim, 1))) model.add(Dropout(0.5)) model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2)))) model.add(TimeDistributed(Flatten())) model.add(LSTM(10, return_sequences=True)) model.add(Dropout(0.5)) model.add(Bidirectional(LSTM(20, return_sequences=True))) model.add(LSTM(10)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc']) return model, (-1, 1, intervals, n_dim, 1) ``` Functions for making the data amount of intervals the same for each data point ``` def min_max_rows_per_subject_vid(X): VideoID = list(set(X[:,1])) SubjectID = list(set(X[:,0])) max_intervals = 0 # length of signal min_intervals = len(X) for subId in SubjectID: for vidId in VideoID: X_tmp=X[(X[:, 0] == subId) & (X[:, 1] == vidId)] max_intervals = max(len(X_tmp), max_intervals) min_intervals = min(len(X_tmp), min_intervals) print(max_intervals) print(min_intervals) assert max_intervals == 144 return min_intervals, max_intervals min_intervals, max_intervals = min_max_rows_per_subject_vid(dataset) def zero_pad_data(X, max_intervals, y_col): # Manual Padding to fixed size: X_pad = None VideoID = list(set(X[:,1])) SubjectID = list(set(X[:,0])) for subId in SubjectID: for vidId in VideoID: X_sv = X[(X[:,0]==subId) & (X[:,1]==vidId)] pad_len = max_intervals - X_sv.shape[0] z = np.zeros((pad_len, X_sv.shape[1]), dtype=X_sv.dtype) z[:,0] = X_sv[:,0][pad_len] z[:,1] = X_sv[:,1][pad_len] z[:,y_col] = X_sv[:,y_col][pad_len] X_sv_pad = np.concatenate((X_sv, z), axis=0) X_sv_pad = X_sv_pad.reshape(1, max_intervals, -1) X_pad = X_sv_pad if X_pad is None else np.vstack((X_pad,X_sv_pad)) return X_pad def truncate_data(X, min_intervals, y_col): X_trunc = None VideoID = list(set(X[:,1])) SubjectID = list(set(X[:,0])) for vidId in VideoID: for subId in SubjectID: X_sv = X[(X[:,0]==subId) & (X[:,1]==vidId)] trunc_len = min_intervals X_sv_trunc = X_sv[0:trunc_len].reshape(1, min_intervals, -1) X_trunc = X_sv_trunc if X_trunc is None else np.vstack((X_trunc, X_sv_trunc)) return X_trunc ``` Define target variable and which variables to use for training ``` y_col = 13 # The student's confusion column orig_train_data_cols = list(range(2,14)) vector_cols = list(np.arange(sub_vec_dim) + 15) train_cols = orig_train_data_cols n_dim = len(train_cols) ``` Cross-validation ``` from time import time def train_eval_model(model, X_train, y_train, X_test, y_test, train_cols, intervals, epochs=20, batch_size=20, verbose=1): start = time() f1s = [] roc_aucs = [] history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(X_test, y_test), verbose=verbose, shuffle=True, callbacks=[f1_auc_callback(X_test, y_test, f1s, roc_aucs)]) print('model trained in {:.3f} seconds'.format(time() - start)) loss, acc = model.evaluate(X_test, y_test, verbose=0) return (acc, np.nanmean(f1s), np.nanmean(roc_aucs), history) def cross_validate(model, X_shape, data, y_col, train_cols, intervals, even_data, n_test=2, time_distributed=False, verbose=1, epochs=50, batch_size=20): """ even_data: either truncate_data or zero_pad_data to make number of intervals even for each data point """ results = [] initial_weights = model.get_weights() for i in range(0, 10, n_test): model.set_weights(initial_weights) # Reset weights to forget training done on current iteration's test data data_train = even_data(data[np.in1d(data[:,0], (i, i+1), invert=True)], intervals, y_col=y_col) data_test = even_data(data[np.in1d(data[:,0], (i, i+1))], intervals, y_col=y_col) X_train = data_train[:, :, train_cols] y_train = data_train[:, :, y_col] X_test = data_test[:, :, train_cols] y_test = data_test[:, :, y_col] X_train = X_train.reshape(X_shape) X_test = X_test.reshape(X_shape) if not time_distributed: y_train = y_train.reshape(-1, intervals).mean(axis=1) y_test = y_test.reshape(-1, intervals).mean(axis=1) if verbose > 1: print('Xtrain shape', X_train.shape) print('Xtest shape', X_test.shape) print('ytrain shape', y_train.shape) print('ytest shape', y_test.shape) start = time() print('{}-fold cross validation, iteration {}' .format(int(10/n_test), len(results) +1), end=' ') acc, f1, roc_auc, history = train_eval_model(model, X_train, y_train, X_test, y_test, train_cols, intervals, epochs=epochs, batch_size=batch_size, verbose=verbose) results.append({'acc': acc, 'F1': f1, 'ROC-AUC': roc_auc}) if verbose > 0: print('current cross-validation mean accuracy: {:.3f}, F1: {:.3f}, and ROC-AUC: {:.3f}'.format( *[np.mean([r[key] for r in results]) for key in results[0].keys()])) print('iteration time: {:.3f} seconds'.format(time() - start)) return results """ Suppress warnings """ import warnings def warn(*args, **kwargs): pass old_warn = warnings.warn warnings.warn = warn """ Test the model from the paper "Confused or not confused" for pre-defined labels """ y_col = 13 train_cols = orig_train_data_cols[:-1] n_dim = len(train_cols) model, input_shape = get_model_timedist(min_intervals, n_dim) tpu_model = tpu_compatibilitate(model) results = cross_validate(model=tpu_model, X_shape=input_shape, data=dataset, y_col=y_col, train_cols=train_cols, time_distributed=True, intervals=min_intervals, even_data=truncate_data, n_test=2, verbose=0, epochs=2, batch_size=20) print(*results, sep='\n') print('cross-validation mean accuracy: {:.3f}, f1: {:.3f}, and roc-auc: {:.3f}'.format( *[np.mean([r[key] for r in results]) for key in results[0].keys()])) def test_model(name, get_model, y_col, data, time_dist=False, use_sub_vecs=False, truncate=True, epochs=2): print('Cross-validation: {}\n'.format(name)) train_cols = list(range(2,y_col)) if use_sub_vecs: train_cols += vector_cols n_dim = len(train_cols) intervals = min_intervals if truncate else max_intervals even_data = truncate_data if truncate else zero_pad_data start = time() model, input_shape = get_model(intervals, n_dim) tpu_model = tpu_compatibilitate(model) results = cross_validate(model=tpu_model, X_shape=input_shape, data=data, y_col=y_col, train_cols=train_cols, time_distributed=time_dist, intervals=intervals, even_data=even_data, n_test=2, verbose=0, epochs=epochs, batch_size=20) print(*results, sep='\n') result_summary = '\nCross-validation: {} mean accuracy: {:.3f}, f1: {:.3f}, and roc-auc: {:.3f}'.format(name, *[np.mean([r[key] for r in results]) for key in results[0].keys()]) print(result_summary) print('cross validation total time: {:.4f} min\n'.format((time() - start) / 60)) return result_summary result_dict = {} for modelname, model in {'lstm50': get_model, 'confused': get_model_timedist, 'mehmani': get_mehmani_model}.items(): for label_col in (13, 14): for truncate in (True, False): for use_sub_vecs in (False, True): label = 'student-defined' if label_col == 14 else 'pre-defined' data_evening = 'truncated' if truncate else 'zero padded' sub_vec_usage = 'with subtitle vectors' if use_sub_vecs else 'without subtitle vectors' name = '{} model for {} labels with {} data and {}'.format( modelname, label, data_evening, sub_vec_usage) time_dist = modelname == 'confused' data = dataset if modelname != 'mehmani' else NormDataG result_summary = test_model(name, model, label_col, data, time_dist, use_sub_vecs, truncate, epochs=40) result_dict[name] = result_summary print(*result_dict.values(), sep='\n') ```
github_jupyter
``` %matplotlib inline from importlib import reload from __future__ import print_function, division import numpy as np import matplotlib.pyplot as plt import pandas as pd from itertools import product from functools import partial from sklearn.datasets import make_sparse_spd_matrix from sklearn.datasets.base import Bunch from sklearn.utils.extmath import squared_norm from sklearn.covariance import GraphLasso, empirical_covariance from sklearn.datasets.base import Bunch from sklearn.model_selection import GridSearchCV, ShuffleSplit from sklearn.gaussian_process import kernels from regain import prox; reload(prox) from regain.covariance import time_graphical_lasso_; reload(time_graphical_lasso_); import time from regain.bayesian import wishart_process_; reload(wishart_process_) from regain.bayesian import stats; reload(stats) import regain.generalized_linear_model.ising; reload(regain.generalized_linear_model.ising) import regain.generalized_linear_model.time; reload(regain.generalized_linear_model.time) from regain.generalized_linear_model.time import TemporalIsingModel from regain import utils; reload(utils); ``` # ISING ``` #devo generare dati temporali.. nel dubbio potrei farli tutti uguali, per cominciare #import regain.datasets.time; reload(regain.datasets.time) #import regain._datasets.ising; reload(regain._datasets.ising) import regain.datasets; reload(regain.datasets) from regain.datasets import make_dataset samples, thetas = make_dataset( n_samples=100, n_dim_obs=10, T=10, update_theta='l1', distribution='ising', change=2) import pickle as pkl with open("../../../../projects/time_exponential_family/data/data_ising_temporal_test_10nodes.pkl", "wb") as f: pkl.dump([samples, thetas], f) import regain.generalized_linear_model.ising; reload(regain.generalized_linear_model.ising) from regain.generalized_linear_model.ising import Ising_GLM_GM import regain.model_selection.stability_optimization; reload(regain.model_selection.stability_optimization) from regain.model_selection.stability_optimization import GraphicalModelStabilitySelection ising_mdl = Ising_GLM_GM(alpha=0.32, mode='symmetric_fbs', verbose=0) params = [] estimators = [] for i in range(10): cv = GraphicalModelStabilitySelection(ising_mdl, param_grid={'alpha':np.linspace(0.1, 0.5, 50)}, sampling_size=80, n_repetitions=20) cv.fit(samples[i]) params.append(cv.best_params_['alpha']) estimators.append(cv.best_estimator_) from sklearn.model_selection import ShuffleSplit sss = ShuffleSplit(n_splits=10) precisions = [] for train, test in sss.split(samples[0]): precs= [] for i, e in enumerate(estimators): e.fit(samples[i][train, :]) precs.append(e.precision_) precisions.append(np.array(precs)) precisions[0].shape plot_roc_curves(np.array(thetas), [np.abs(p) for p in precisions]) ising_mdl.get_params() utils.structure_error(np.array(thetas), np.array(precisions), no_diagonal=True) import pickle as pkl with open("../../../../projects/time_exponential_family/results_ising/results_single_10nodes.pkl", "wb") as f: pkl.dump([precisions, params], f) import regain.model_selection.stability_optimization; reload(regain.model_selection.stability_optimization) from regain.model_selection.stability_optimization import GraphicalModelStabilitySelection cv = GraphicalModelStabilitySelection(mdl, param_grid={'alpha':np.linspace(0.1, 0.5, 50)}, sampling_size=800, plot=True, n_repetitions=20) cv.fit(X, y) #import regain.generalized_linear_model.time; reload(regain.generalized_linear_model.time) from regain.generalized_temporal_linear_model.ising import TemporalIsingModel from sklearn.gaussian_process import kernels kernel = kernels.RBF(length_scale=10)(np.arange(10)[:, None]) #kernel = np.ones((10,10))*1 #np.fill_diagonal(kernel, 1) mdl = TemporalIsingModel( alpha=np.mean(params), psi='l1', assume_centered=True, rtol=1e-5, tol=1e-4, max_iter=300, rho=1., kernel=kernels.RBF, ker_param='auto', init='zeros', verbose=0) X = np.vstack(samples) y = np.array([np.ones(x.shape[0]) * i for i, x in enumerate(samples)]).flatten().astype(int) #base_results(mdl, X, y, K, thetas, ells, search_spaces=None) mdl.fit(X, y) from sklearn.model_selection import StratifiedShuffleSplit sss = StratifiedShuffleSplit(n_splits=10) precisions_time = [] for train, test in sss.split(X, y): mdl.fit(X[train, :], y[train]) precisions_time.append(mdl.precision_) utils.structure_error(np.array(thetas), mdl.precision_, no_diagonal=True) import regain.plotting.results; reload(regain.plotting.results) from regain.plotting.results import plot_roc_curves, plot_precision_recall_comparison, plot_roc_comparison plot_roc_comparison(np.abs(np.array(thetas)), {'TIGM':[np.abs(p) for p in precisions_time], 'IGM':[np.abs(p) for p in precisions]}) import regain.plotting.results; reload(regain.plotting.results) from regain.plotting.results import plot_roc_curves, plot_precision_recall_comparison, plot_roc_comparison plot_precision_recall_comparison(np.abs(np.array(thetas)), {'TIGM':[np.abs(p) for p in precisions_time], 'IGM':[np.abs(p) for p in precisions]}) import regain.utils; reload(regain.utils) from regain.utils import structure_error, mean_structure_error mean_structure_error(np.array(thetas), [np.abs(p) for p in precisions]) mean_structure_error(np.array(thetas), [np.abs(p) for p in precisions_time]) ```
github_jupyter
All the files we saved will find [here](https://drive.google.com/drive/folders/18NjSyrEIbtnT5V56xoC3odkS0TEQuFaw?usp=sharing) . ## Install Neccessary Packages ``` !pip install haversine !pip install nvector import pandas as pd import numpy as np from math import radians, degrees, pi, asin, sin, cos, atan2 ``` ## Data Loading ``` !rm -r /content/sample_data/ !mkdir train valid test !curl -o /content/train/SHL-2021-Train.zip http://www.shl-dataset.org/wp-content/uploads/SHLChallenge2021/SHL-2021-Train.zip !curl -o /content/valid/SHL-2021-Validate_11052021.zip http://www.shl-dataset.org/wp-content/uploads/SHLChallenge2021/SHL-2021-Validate_11052021.zip !curl -o /content/test/SHL-2021-Test.zip http://www.shl-dataset.org/wp-content/uploads/SHLChallenge2021/SHL-2021-Test.zip import zipfile import os from tqdm import notebook as tqdm def un_zipFiles(path): files=os.listdir(path) for file in tqdm.tqdm(files): if file.endswith('.zip'): filePath=path+'/'+file zip_file = zipfile.ZipFile(filePath) for names in zip_file.namelist(): zip_file.extract(names,path) zip_file.close() un_zipFiles('/content/train') un_zipFiles('/content/valid') un_zipFiles('/content/test') !rm /content/train/SHL-2021-Train.zip /content/valid/SHL-2021-Validate_11052021.zip /content/test/SHL-2021-Test.zip ``` ## Preprocessing ``` # Round datetime to nearst integer def convert_date(time): ms=time.strftime('.%f') if float(ms)>0.5: time=time+pd.Timedelta('1s') time1=time.strftime('%Y-%m-%d %H:%M:%S') return time1 ``` ### Location ``` def read_train_data(path): location=pd.read_csv(path,delimiter='\t',header=None,names=['location']) location=location['location'].str.split(expand=True) location.columns=['Timestamp','Ignore2','Ignore3','accuracy(m)','latitude','longitude','altitude'] location.drop(['Ignore2','Ignore3'],axis=1,inplace=True) location['Timestamp']=pd.to_datetime(location.Timestamp, unit='ms') location['Timestamp']=pd.to_datetime(location['Timestamp'].apply(lambda x: convert_date(x))) location[location.columns[1:]] = location[location.columns[1:]].astype(np.number) return location # Train train_location = read_train_data('/content/train/Location.txt') train_location.head() # Valid val_location = read_train_data('/content/valid/Location.txt') val_location.head() # Test test_location = read_train_data('/content/test/Location.txt') test_location.head() ``` ### Cell ``` def label(df): condition = [df['cellType'] == 'LTE', df['cellType'] == 'GSM', df['cellType'] == 'WCDMA'] value = [0,1,2] df['cellType'] = np.select(condition,value) return df def read_cell_data(path): import gc df = pd.read_csv(path,delimiter='\t',header=None,names=['col']) df = df['col'].str.split(expand=True, n=14) df.drop([1, 2, df.columns[-1]],axis=1,inplace=True) temp = df[df.loc[:, 4] == 'GSM'].drop([5, 6, 7, 8, 9, 13], axis=1) temp.columns = [col for col in range(len(temp.columns))] temp1 = df[df.loc[:, 4] != 'GSM'].drop([5, 6, 7, 8, 9, 10], axis=1) temp1.columns = [col for col in range(len(temp1.columns))] df = pd.concat([temp, temp1]) df.columns = ['Timestamp', 'noOfCell', 'cellType', 'asuLevel', 'dBm', 'signalLevel'] df['Timestamp'] = pd.to_datetime(df.Timestamp, unit='ms') df['Timestamp'] = pd.to_datetime(df['Timestamp'].apply(lambda x: convert_date(x))) df.drop_duplicates(subset=['Timestamp'], keep='first', inplace=True) df[['asuLevel', 'dBm', 'noOfCell', 'signalLevel']] = df[['asuLevel', 'dBm', 'noOfCell', 'signalLevel']].astype(np.float64) df = label(df) del temp, temp1 gc.collect() return df.sort_index() # Train train_cell = read_cell_data('/content/train/Cells.txt') train_cell.head() # Valid val_cell = read_cell_data('/content/valid/Cells.txt') val_cell.head() # Test test_cell = read_cell_data('/content/test/Cells.txt') test_cell.head() ``` ### GPS ``` def read_gps_data(path): from tqdm import notebook as tqdm with open(path, 'r') as f: lines = f.readlines() times = [] no_of_satelites = [] max_snrs = [] nearest_satelites = [] for line in tqdm.tqdm(lines): li = line.strip().split() max_snr = 0 nearest_satelite = 0 for m in range(int(li[-1])): snr = float(li[m*4 + 4]) if snr > max_snr: max_snr = snr nearest_satelite = int(li[m*4 + 3]) times.append(li[0]) no_of_satelites.append(int(li[-1])) max_snrs.append(max_snr) nearest_satelites.append(nearest_satelite) print('------------- Converting to Dataframe -------------') gps = pd.DataFrame([times, no_of_satelites, max_snrs, nearest_satelites]).T gps.columns = ['Timestamp', 'no_of_satelite', 'max_snr', 'nearest_satelite'] gps['Timestamp'] = pd.to_datetime(gps.Timestamp, unit='ms') gps['Timestamp'] = pd.to_datetime(gps['Timestamp'].apply(lambda x: convert_date(x))) gps[['no_of_satelite', 'nearest_satelite', 'max_snr']] = gps[['no_of_satelite', 'nearest_satelite', 'max_snr']].astype(np.float64) return gps # Train train_gps = read_gps_data('/content/train/GPS.txt') train_gps.head() # Valid val_gps = read_gps_data('/content/valid/GPS.txt') val_gps.head() # Test test_gps = read_gps_data('/content/test/GPS.txt') test_gps.head() ``` ### WiFi ``` def get_first_col(listx): return listx[0] def get_4th(listx): return int(listx[3]) def get_rssi_list(l): networks = int(l[3]) rssi_list = list() for i in range(networks): rssi_list.append(l[i*5+6]) return rssi_list #the highest value of rssi(in dB) was selected as best rssi def get_best_rssi(l): networks = int(l[3]) rssi_list = list() best_rssi = 0 for i in range(networks): rssi_list.append(l[i*5+6]) if i == 0: best_rssi = int(l[i*5+6]) elif best_rssi< int(l[i*5+6]): best_rssi = int(l[i*5+6]) return best_rssi def level_rssi(l): rssi_lessThan50 = 0 rssi_50_60 = 0 rssi_60_70 = 0 rssi_70_80 = 0 rssi_80_90 = 0 rssi_zero = 0 rssi_greaterThan90 = 0 for items in l: if int(items)==0: rssi_zero+=1 elif int(items)>-50: rssi_lessThan50 += 1 elif int(items)>-60: rssi_50_60 += 1 elif int(items)>-70: rssi_60_70 += 1 elif int(items)>-80: rssi_70_80 += 1 elif int(items)>-90: rssi_80_90 += 1 else : rssi_greaterThan90 += 1 return [rssi_lessThan50, rssi_50_60, rssi_60_70, rssi_70_80, rssi_80_90, rssi_greaterThan90, rssi_zero] def get_freq(l): networks = int(l[3]) freq_list = list() for i in range(networks): freq_list.append(round(int(l[i*5+7])/1000)) return [freq_list.count(2),freq_list.count(5)] def read_wifi_data(path): import gc wifi = pd.read_csv(path, delimiter='\t', header=None, names=['wifi_data']) wifi["wifi_data"] = wifi["wifi_data"].str.split(';') wifi['Timestamp'] = wifi["wifi_data"].apply(lambda x: get_first_col(x)) wifi['Timestamp'] = pd.to_datetime(wifi.Timestamp, unit='ms') wifi['Timestamp'] = pd.to_datetime(wifi['Timestamp'].apply(lambda x: convert_date(x))) wifi['available'] = wifi["wifi_data"].apply(lambda x: get_4th(x)) wifi["all_rssi"] = wifi["wifi_data"].apply(lambda x: get_rssi_list(x)) wifi["best_rssi"] = wifi["wifi_data"].apply(lambda x: get_best_rssi(x)) wifi["rssi_level"] = wifi["all_rssi"].apply(lambda x: level_rssi(x)) wifi[['excelent_rssi','veryGood_rssi','good_rssi','low_rssi','veryLow_rssi','poor_rssi','No Signal']] = pd.DataFrame(wifi.rssi_level.to_list(), index = wifi.index) wifi['frequency'] = wifi["wifi_data"].apply(lambda x: get_freq(x)) wifi[['2.4GHz','5GHz']] = pd.DataFrame(wifi.frequency.to_list(), index = wifi.index) cols = ['Timestamp', 'available', 'best_rssi', 'excelent_rssi', 'veryGood_rssi', 'low_rssi', 'veryLow_rssi', 'poor_rssi', '2.4GHz', '5GHz'] return wifi[cols] # Train train_wifi = read_wifi_data('/content/train/WiFi.txt') train_wifi.head() # Valid val_wifi = read_wifi_data('/content/valid/WiFi.txt') val_wifi.head() # Test test_wifi = read_wifi_data('/content/test/WiFi.txt') test_wifi.head() ``` ### Labels ``` # Train train_label = pd.read_csv('/content/train/Label.txt',delimiter='\t',header=None,names=['Timestamp','label']) train_label['Timestamp'] = pd.to_datetime(train_label.Timestamp, unit='ms') train_label.head() # Valid val_label = pd.read_csv('/content/valid/Label.txt',delimiter='\t',header=None,names=['Timestamp','label']) val_label['Timestamp'] = pd.to_datetime(val_label.Timestamp, unit='ms') val_label.head() # Test test_label_idx = pd.read_csv('/content/test/Label_idx.txt',delimiter='\n',header=None,names=['Timestamp']) test_label_idx['Timestamp'] = pd.to_datetime(test_label_idx.Timestamp, unit='ms') test_label_idx.head() ``` ### Merge All ``` # Train df = pd.merge(train_label, train_location, on='Timestamp', how='left') df = pd.merge(df, train_gps, on='Timestamp', how='left') df = pd.merge(df, train_cell, on='Timestamp', how='left') df = pd.merge(df, train_wifi, on='Timestamp', how='left') df.index = np.arange(len(df)) df.drop_duplicates(subset=['Timestamp'], inplace = True) df.head() # Valid df_val = pd.merge(val_label, val_location, on='Timestamp', how='left') df_val = pd.merge(df_val, val_gps, on='Timestamp', how='left') df_val = pd.merge(df_val, val_cell, on='Timestamp', how='left') df_val = pd.merge(df_val, val_wifi, on='Timestamp', how='left') df_val.index = np.arange(len(df_val)) df_val.drop_duplicates(subset=['Timestamp'], inplace = True) df_val.head() # Test df_test = pd.merge(test_label_idx, test_location, on='Timestamp', how='left') df_test = pd.merge(df_test, test_gps, on='Timestamp', how='left') df_test = pd.merge(df_test, test_cell, on='Timestamp', how='left') df_test = pd.merge(df_test, test_wifi, on='Timestamp', how='left') df_test.index = np.arange(len(df_test)) df_test.drop_duplicates(subset=['Timestamp'], inplace = True) df_test.head() ``` Save to drive ``` from google.colab import drive drive.mount('/content/drive') df.to_pickle('/content/drive/My Drive/SHL/data/df-v2.pickle') df_val.to_pickle('/content/drive/My Drive/SHL/data/df_val-v2.pickle') df_test.to_pickle('/content/drive/My Drive/SHL/data/df_test-v2.pickle') df = pd.read_pickle('/content/drive/My Drive/SHL/data/df-v1.pickle') df_val = pd.read_pickle('/content/drive/My Drive/SHL/data/df_val-v1.pickle') df_test = pd.read_pickle('/content/drive/My Drive/SHL/data/df_test-v1.pickle') ``` ### Handle NaN ``` # Train # NaN before df.isna().sum() # Valid # NaN before df_val.isna().sum() # Test # NaN before df_test.isna().sum() # Train df.fillna(method='ffill', limit=5, axis=0, inplace=True) df.fillna(method='bfill', limit=5, axis=0, inplace=True) # Valid df_val.fillna(method='ffill', limit=5, axis=0, inplace=True) df_val.fillna(method='bfill', limit=5, axis=0, inplace=True) # Test df_test.fillna(method='ffill', limit=5, axis=0, inplace=True) df_test.fillna(method='bfill', limit=5, axis=0, inplace=True) # Train # NaN after df.isna().sum() # Valid # NaN after df_val.isna().sum() # Test # NaN after df_test.isna().sum() ``` Don't drop remaining NaN values. ``` df.head() ``` ## Feature Analysis ### Features based on location Timewise segmentation for calculating velocity acceleration ...If Time interval between two row greater than 30s/1min/1h then divide ``` def timewise_segmentation(data): from tqdm import notebook as tqdm dataframe_list=[] segmented_dataframe=[] for i in tqdm.tqdm(np.arange(len(data)-1)): timedelta=pd.to_datetime(data.iloc[i+1]['Timestamp'])-pd.to_datetime(data.iloc[i]['Timestamp']) segmented_dataframe.append(data.iloc[i]) if timedelta< pd.Timedelta('30s'): continue elif timedelta>= pd.Timedelta('30s'): dataframe=pd.DataFrame(segmented_dataframe) dataframe_list.append(dataframe) segmented_dataframe=[] segmented_dataframe.append(data.iloc[-1]) dataframe=pd.DataFrame(segmented_dataframe) dataframe_list.append(dataframe) return dataframe_list train_segmented_list = timewise_segmentation(df) val_segmented_list = timewise_segmentation(df_val) test_segmented_list = timewise_segmentation(df_test) def convert_to_xy(latitude, longitude): import math latRad = latitude * (math.pi)/180 lonRad = longitude * (math.pi)/180 earthRadius = 6367000 posX = earthRadius * math.cos(latRad) * math.cos(lonRad) posY = earthRadius * math.cos(latRad) * math.sin(lonRad) return {'x': posX,'y': posY} def calculate_hsdist_from_pos(loc1,loc2): from haversine import Unit import haversine as hs return hs.haversine(loc1,loc2,unit=Unit.METERS) def calc_angle(delx,dely,delz): import math vector_2=[delx,dely,delz] if np.linalg.norm(vector_2)==0: return 0 else: unit_vector_1 = [0,1,0] unit_vector_2 = vector_2 / np.linalg.norm(vector_2) dot_product = np.dot(unit_vector_1, unit_vector_2) angle = np.arccos(dot_product)*180/np.pi return angle def make_features(list): import nvector as nv from tqdm import notebook as tqdm for data in tqdm.tqdm(list): data['distance']=0 data['velocity']=0 data['acceleration']=0 data['vec_x']=0 data['vec_y']=0 data['vec_z']=0 data['vec_azimuth']=0 data['heading_vector']=0 data['vel_change_rate']=0 data.index=np.arange(len(data)) for i in np.arange(len(data)-1): loc1=(float(data.loc[i,'latitude']),float(data.loc[i,'longitude'])) loc2=(float(data.loc[i+1,'latitude']),float(data.loc[i+1,'longitude'])) dist=calculate_hsdist_from_pos(loc1,loc2) data.loc[i+1,'distance']=dist del_t=(pd.to_datetime(data.loc[i+1,'Timestamp'])-pd.to_datetime(data.loc[i,'Timestamp'])).seconds v=dist/del_t data.loc[i+1,'velocity']=v a=(data.loc[i+1,'velocity']-data.loc[i,'velocity'])/del_t data.loc[i+1,'acceleration']=a wgs84 = nv.FrameE(name='WGS84') pointA = wgs84.GeoPoint(latitude=float(data.loc[i,'latitude']), longitude=float(data.loc[i,'longitude']), z=float(data.loc[i,'altitude']), degrees=True) pointB = wgs84.GeoPoint(latitude=float(data.loc[i+1,'latitude']), longitude=float(data.loc[i+1,'longitude']), z=float(data.loc[i+1,'altitude']), degrees=True) p_AB_N = pointA.delta_to(pointB) x, y, z = p_AB_N.pvector.ravel() azimuth = p_AB_N.azimuth_deg az=float('{0:4.2f}'.format(azimuth)) data.loc[i+1,'vec_x']=x data.loc[i+1,'vec_y']=y data.loc[i+1,'vec_z']=z data.loc[i+1,'vec_azimuth']=az data.loc[i+1,'heading_vector']=calc_angle(x,y,z) if data.loc[i,'velocity']!=0: data.loc[i+1,'vel_change_rate']=(data.loc[i+1,'velocity']-data.loc[i,'velocity'])/data.loc[i,'velocity'] elif data.loc[i,'velocity']==0: data.loc[i+1,'vel_change_rate']=data.loc[i+1,'velocity']-data.loc[i,'velocity'] make_features(train_segmented_list) make_features(val_segmented_list) make_features(test_segmented_list) train=pd.concat([i for i in train_segmented_list], ignore_index = True) val = pd.concat([i for i in val_segmented_list], ignore_index = True) test = pd.concat([i for i in test_segmented_list], ignore_index = True) train.head() val.head() ``` ### Save to drive ``` from google.colab import drive drive.mount('/content/drive') train.to_pickle('/content/drive/MyDrive/SHL/data/train-v2.pickle') val.to_pickle('/content/drive/MyDrive/SHL/data/val-v2.pickle') test.to_pickle('/content/drive/MyDrive/SHL/data/test-v2.pickle') import pandas as pd import numpy as np train = pd.read_pickle('/content/drive/MyDrive/SHL/data/train-v2.pickle') val = pd.read_pickle('/content/drive/MyDrive/SHL/data/val-v2.pickle') test = pd.read_pickle('/content/drive/MyDrive/SHL/data/test-v2.pickle') ``` ### Features based on trajectory ``` def get_traj(df): from tqdm import notebook as tqdm ranges = [] time_of_flights = [] for row in tqdm.tqdm(range(len(df))): range_, time_of_flight = calculateTraf(df.loc[row, 'heading_vector'], df.loc[row, 'velocity']) ranges.append(range_) time_of_flights.append(time_of_flight) traj_df = pd.DataFrame([ranges, time_of_flights]).T traj_df.columns = ['range', 'time_of_flight'] final_df = pd.concat([df, traj_df], axis=1) return final_df from math import sin,cos,pi def calculateTraf(angle, velocity ): #defining gravity gravity = float(9.8) #converting angle to radians angle =angle * pi / 180 #calculating horizontal and vertical components of the velocity velocity_h= velocity*cos(angle) velocity_v = velocity*sin(angle) #computing time and distance of flight time_of_flight =2 * float(velocity_v) / gravity range = float(time_of_flight) * velocity_h return range, time_of_flight train_traj = get_traj(train) val_traj = get_traj(val) test_traj = get_traj(test) ``` ### Features based on cell ``` def ctype_connection(cell): from tqdm import notebook as tqdm lte_list_0, gsm_list_0, wcdma_list_0, lte_list_1, gsm_list_1, wcdma_list_1 = [], [], [], [], [], [] for row in tqdm.tqdm(range(cell.shape[0])): lte_0, gsm_0, wcdma_0, lte_1, gsm_1, wcdma_1 = [], [], [], [], [], [] for col in range(cell.shape[1]-1): ctype = cell.loc[row, col] if ctype == None: continue a = cell.loc[row,col+1] if (ctype == 'LTE') and (a == '0'): # if (cell.loc[row,col] == 'LTE/GSM/WCDMA') and (cell.loc[row,col+1] == '0/1') lte_0.append(a) elif (ctype == 'GSM') and (a == '0'): gsm_0.append(a) elif (ctype == 'WCDMA') and (a == '0'): wcdma_0.append(a) elif (ctype == 'LTE') and (a == '1'): lte_1.append(a) elif (ctype == 'GSM') and (a == '1'): gsm_1.append(a) elif (ctype == 'WCDMA') and (a == '1'): wcdma_1.append(a) lte_list_0.append(len(lte_0)) gsm_list_0.append(len(gsm_0)) wcdma_list_0.append(len(wcdma_0)) lte_list_1.append(len(lte_1)) gsm_list_1.append(len(gsm_1)) wcdma_list_1.append(len(wcdma_1)) return [lte_list_0, gsm_list_0, wcdma_list_0, lte_list_1, gsm_list_1, wcdma_list_1] def get_connection_features_from_cell(path, df_concat): import gc df = pd.read_csv(path, delimiter='\t', header=None, names=['col']) df = df['col'].str.split(expand=True) features = ctype_connection(df) cols = ['LTE_no_connection', 'GSM_no_connection', 'WCDMA_no_connection', 'LTE_connection', 'GSM_connection', 'WCDMA_connection'] cell = pd.DataFrame(features, index=None, columns=None).T cell.columns = cols cell['Timestamp'] = df.loc[:, 0] cell['Timestamp'] = pd.to_datetime(cell.Timestamp, unit='ms') cell['Timestamp'] = pd.to_datetime(cell['Timestamp'].apply(lambda x: convert_date(x))) cell.fillna(method='ffill', limit=5, axis=0, inplace=True) cell.fillna(method='bfill', limit=5, axis=0, inplace=True) df_concat = pd.merge(df_concat, cell, on='Timestamp', how='left') df_concat.drop_duplicates(subset='Timestamp', inplace=True) df_concat.index = np.arange(len(df_concat)) del cell, df, features gc.collect() return df_concat train_prepared = get_connection_features_from_cell('/content/train/Cells.txt', train_traj) val_prepared = get_connection_features_from_cell('/content/valid/Cells.txt', val_traj) test_prepared = get_connection_features_from_cell('/content/test/Cells.txt', test_traj) ``` ### Features based on frequency domain ``` def convert_to_frequency_domain(df, cols): from scipy import fftpack psd_list = [] for col in cols: fft = pd.DataFrame(fftpack.fft(pd.DataFrame(df[col])), columns=['psd_' + col]) psd_list.append(np.abs(fft) ** 2) fft_freq = pd.DataFrame(fftpack.fftfreq(len(psd_list)), columns=['frequency']) return pd.concat([df] + [psd for psd in psd_list] + [fft_freq], axis=1) train_prepared = convert_to_frequency_domain(train_prepared, train_prepared.columns[2:]) val_prepared = convert_to_frequency_domain(val_prepared, val_prepared.columns[2:]) test_prepared = convert_to_frequency_domain(test_prepared, test_prepared.columns[1:]) test_prepared.shape, train_prepared.shape, val_prepared.shape test_prepared.head() ``` ### Save to drive ``` from google.colab import drive drive.mount('/content/drive') train_prepared.to_pickle('/content/drive/My Drive/SHL/data/train_prepared-v4.pickle') val_prepared.to_pickle('/content/drive/My Drive/SHL/data/val_prepared-v4.pickle') test_prepared.to_pickle('/content/drive/My Drive/SHL/data/test_prepared-v4.pickle') import pandas as pd import numpy as np train_prepared=pd.read_pickle('/content/drive/MyDrive/SHL/data/train_prepared-v2.pickle') val_prepared=pd.read_pickle('/content/drive/MyDrive/SHL/data/val_prepared-v2.pickle') test_prepared=pd.read_pickle('/content/drive/MyDrive/SHL/data/test_prepared-v2.pickle') ```
github_jupyter
# Law of Large Numbers The law of large numbers points out that as the number of trials or observations increases, the observed or the actual probability approaches the expected mean or the theoretical value. ### Uses of LNN It is used both in context of business and finance. In the context of business this is observed as growth rates more or less converge to the growth rates of the economy. Similarly in the financial context where the companies with huge market capitalization more or less become stagnant and don’t really see the growth observed previously. ### Limitation The average of the results obtained from a large number of trials may fail to converge in some cases. For instance, the average of n results taken from the Cauchy distribution or some Pareto distributions (α<1) will not converge as n becomes larger; the reason is heavy tails. ### Example: A good example in which Law of Large Numbers can be observed is flipping of a coin. While we theoretically may deduce that there is 0.5 probability each of heads and tails coming but if we do less tosses say 10 tosses then we can see that the probability does not match the theoretical possibility. There might be a higher probability towards one of the two outcomes.But as we increase the number of tosses then it slowly comes closer to theoretical value. But on the other hand, if the coin is flipped large number of times say 10000 timesthen the probability of getting a heads is much closer to the expected value of 0.5. ``` import matplotlib.pyplot as plt import random ``` Let's create a random set of rolling of a dice. Theoretically each face has a chance of 1/6=0.167 of coming up. We would first run this for 100 times and 10000 times to see the difference. If there are less tosses there might be more chance of any one side coming up but doing it for a lot of times would lead us closer to theoretical value. ``` # Case1: 100 times of rolling dice n=1 probability_of_one=[] tosses=[] while n<100: one_outcome=0 rest_outcome=0 for i in range(n): if random.randint(1,6)==1: one_outcome+=1 else: rest_outcome+=1 prob=one_outcome/(one_outcome+rest_outcome) probability_of_one. append(prob) tosses.append(n) n+=1 plt.subplot(2,1,1) plt.hist(probability_of_one,100,label='Probality of Ones') plt.legend() plt.subplot(2,1,2) plt.plot(tosses,probability_of_one) plt.xlabel('Number of Tosses') plt.ylabel('Probability of Ones') plt.grid(True) plt.show() # Case2: 1000 times of rolling dice n=1 probability_of_one=[] tosses=[] while n<1000: one_outcome=0 rest_outcome=0 for i in range(n): if random.randint(1,6)==1: one_outcome+=1 else: rest_outcome+=1 prob=one_outcome/(one_outcome+rest_outcome) probability_of_one. append(prob) tosses.append(n) n+=1 plt.subplot(2,1,1) plt.hist(probability_of_one,100,label='Probality of Ones') plt.legend() plt.subplot(2,1,2) plt.plot(tosses,probability_of_one) plt.xlabel('Number of Tosses') plt.ylabel('Probability of Ones') plt.grid(True) plt.show() ``` Hence we can observe the difference between the two sets of graphs. As the number of tosses increased the probability distribution becomes less scattered and gets closer to theoretical value of 0.167. # Difference between CLT and LNN The law of large numbers as well as the central limit theorem are partial solutions to a general problem: “What is the limiting behaviour of sample mean (S_n) as sample size (n) approaches infinity?” # CLT The sample mean will approximately be normally distributed for large sample sizes, regardless of the distribution from which we are sampling. ``` from IPython.display import Image Image("C:/Users/pksds/Desktop/IllustrationCentralTheorem2.png") import numpy import matplotlib.pyplot as plt # number of sample num = [1, 10, 50, 100] # list of sample means means = [] # Generating 1, 10, 30, 100 random numbers from -40 to 40 # taking their mean and appending it to list means. for j in num: # Generating seed so that we can get same result # every time the loop is run... numpy.random.seed(1) x = [numpy.mean( numpy.random.randint( -40, 40, j)) for _i in range(1000)] means.append(x) k = 0 # plotting all the means in one figure fig, ax = plt.subplots(2, 2, figsize =(8, 8)) for i in range(0, 2): for j in range(0, 2): # Histogram for each x stored in means ax[i, j].hist(means[k], 10, density = True) ax[i, j].set_title(label = num[k]) k = k + 1 ``` It is evident from the graphs that as we keep on increasing the sample size from 1 to 100 the histogram tends to take the shape of a normal distribution. # LNN The Law of Large Numbers (of which there are several variants, for example the Strong Law of Large Numbers and the Weak Law of Large Numbers) states that the sample average converges to the Expected Value of the RV as n tends to infinity. Again IID RVs are assumed. An instance of sample mean of size n tends to be closer and closer to the population mean µ as n → ∞ ``` import matplotlib.pyplot as plt import random n=1 total_trails=100 while n<total_trails: head=0 tail=0 for i in range(n): if random.randint(0,1)==0: head+=1 else: tail+=1 k=head/(head+tail) n+=1 probability=head/total_trails probability import matplotlib.pyplot as plt import random n=1 total_trails=5000 while n<total_trails: head=0 tail=0 for i in range(n): if random.randint(0,1)==0: head+=1 else: tail+=1 k=head/(head+tail) n+=1 probability=head/total_trails probability ``` As no of trials increases probability of getting head approaches to 0.5 . here, when the total trails is 100 probability is 0.47 whereas when the total trails is 5000 probability is 0.491 which is close to 0.5(idle case). Hence LNN states that the sample average converges to the Expected Value of the RV as n tends to infinity. # Conclusion LLN and CLT both try to approximately tell us the behaviour of the sample mean. CLT gives us the approximate shape of the distribution. The LLN just talks about the approximate value of sample mean which of-course becomes closer and closer to the population mean as ‘n’ becomes large. There is really one key difference between Law of Large Number and Central Limted Theorem: that is the way they normalized are different. LLN is normalized by dividing N , the number of indenpendent sum, which is a way of normalizing to the mean; while CLT is normalizd by dividing √N , which is a way of normalizing to the power. That is why LLN will show up as a mean, while CLT will show up as a distribution.
github_jupyter
# Clustering with KMeans in Shogun Machine Learning Toolbox #### Notebook by Parijat Mazumdar (GitHub ID: <a href='https://github.com/mazumdarparijat'>mazumdarparijat</a>) This notebook demonstrates <a href="http://en.wikipedia.org/wiki/K-means_clustering">clustering with KMeans</a> in Shogun along with its initialization and training. The initialization of cluster centres is shown manually, randomly and using the <a href="http://en.wikipedia.org/wiki/K-means%2B%2B">KMeans++</a> algorithm. Training is done via the classical <a href="http://en.wikipedia.org/wiki/Lloyd%27s_algorithm">Lloyds</a> and mini-batch KMeans method. It is then applied to a real world data set. Furthermore, the effect of dimensionality reduction using <a href="http://en.wikipedia.org/wiki/Principal_component_analysis">PCA</a> is analysed on the KMeans algorithm. ## KMeans - An Overview The <a href="http://en.wikipedia.org/wiki/K-means_clustering">KMeans clustering algorithm</a> is used to partition a space of n observations into k partitions (or clusters). Each of these clusters is denoted by the mean of the observation vectors belonging to it and a unique label which is attached to all the observations belonging to it. Thus, in general, the algorithm takes parameter k and an observation matrix (along with the notion of distance between points ie <i>distance metric</i>) as input and returns mean of each of the k clusters along with labels indicating belongingness of each observations. Let us construct a simple example to understand how it is done in Shogun using the <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CKMeans.html">CKMeans</a> class. Let us start by creating a toy dataset. ``` from numpy import concatenate, array from numpy.random import randn import os SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data') num = 200 d1 = concatenate((randn(1,num),10.*randn(1,num)),0) d2 = concatenate((randn(1,num),10.*randn(1,num)),0)+array([[10.],[0.]]) d3 = concatenate((randn(1,num),10.*randn(1,num)),0)+array([[0.],[100.]]) d4 = concatenate((randn(1,num),10.*randn(1,num)),0)+array([[10.],[100.]]) rectangle = concatenate((d1,d2,d3,d4),1) totalPoints = 800 ``` The toy data created above consists of 4 gaussian blobs, having 200 points each, centered around the vertices of a rectancle. Let's plot it for convenience. ``` import matplotlib.pyplot as pyplot %matplotlib inline figure,axis = pyplot.subplots(1,1) axis.plot(rectangle[0], rectangle[1], 'o', color='r', markersize=5) axis.set_xlim(-5,15) axis.set_ylim(-50,150) axis.set_title('Toy data : Rectangle') pyplot.show() ``` With data at our disposal, it is time to apply KMeans to it using the KMeans class in Shogun. First we construct Shogun features from our data: ``` from shogun import * import shogun as sg train_features = features(rectangle) ``` Next we specify the number of clusters we want and create a distance object specifying the distance metric to be used over our data for our KMeans training: ``` # number of clusters k = 2 # distance metric over feature matrix - Euclidean distance distance = sg.distance('EuclideanDistance') distance.init(train_features, train_features) ``` Next, we create a KMeans object with our desired inputs/parameters and train: ``` # KMeans object created kmeans = KMeans(k, distance) # KMeans training kmeans.train() ``` Now that training has been done, let's get the cluster centers and label for each data point ``` # cluster centers centers = kmeans.get_cluster_centers() # Labels for data points result = kmeans.apply() ``` Finally let us plot the centers and the data points (in different colours for different clusters): ``` def plotResult(title = 'KMeans Plot'): figure,axis = pyplot.subplots(1,1) for i in range(totalPoints): if result[i]==0.0: axis.plot(rectangle[0,i], rectangle[1,i], 'o', color='g', markersize=3) else: axis.plot(rectangle[0,i], rectangle[1,i], 'o', color='y', markersize=3) axis.plot(centers[0,0], centers[1,0], 'ko', color='g', markersize=10) axis.plot(centers[0,1], centers[1,1], 'ko', color='y', markersize=10) axis.set_xlim(-5,15) axis.set_ylim(-50,150) axis.set_title(title) pyplot.show() plotResult('KMeans Results') ``` <b>Note:</b> You might not get the perfect result always. That is an inherent flaw of KMeans algorithm. In subsequent sections, we will discuss techniques which allow us to counter this.<br> Now that we have already worked out a simple KMeans implementation, it's time to understand certain specifics of KMeans implementaion and the options provided by Shogun to its users. ## Initialization of cluster centers The KMeans algorithm requires that the cluster centers are initialized with some values. Shogun offers 3 ways to initialize the clusters. <ul><li>Random initialization (default)</li><li>Initialization by hand</li><li>Initialization using <a href="http://en.wikipedia.org/wiki/K-means%2B%2B">KMeans++ algorithm</a></li></ul>Unless the user supplies initial centers or tells Shogun to use KMeans++, Random initialization is the default method used for cluster center initialization. This was precisely the case in the example discussed above. ### Initialization by hand There are 2 ways to initialize centers by hand. One way is to pass on the centers during KMeans object creation, as follows: ``` from numpy import array initial_centers = array([[0.,10.],[50.,50.]]) # initial centers passed kmeans = KMeans(k, distance, initial_centers) ``` Now, let's first get results by repeating the rest of the steps: ``` # KMeans training kmeans.train(train_features) # cluster centers centers = kmeans.get_cluster_centers() # Labels for data points result = kmeans.apply() # plot the results plotResult('Hand initialized KMeans Results 1') ``` The other way to initialize centers by hand is as follows: ``` new_initial_centers = array([[5.,5.],[0.,100.]]) # set new initial centers kmeans.set_initial_centers(new_initial_centers) ``` Let's complete the rest of the code to get results. ``` # KMeans training kmeans.train(train_features) # cluster centers centers = kmeans.get_cluster_centers() # Labels for data points result = kmeans.apply() # plot the results plotResult('Hand initialized KMeans Results 2') ``` Note the difference that inititial cluster centers can have on final result. ### Initializing using KMeans++ algorithm In Shogun, a user can also use <a href="http://en.wikipedia.org/wiki/K-means%2B%2B">KMeans++ algorithm</a> for center initialization. Using KMeans++ for center initialization is beneficial because it reduces total iterations used by KMeans and also the final centers mostly correspond to the global minima, which is often not the case with KMeans with random initialization. One of the ways to use KMeans++ is to set flag as <i>true</i> during KMeans object creation, as follows: ``` # set flag for using KMeans++ kmeans = KMeans(k, distance, True) ``` The other way to initilize using KMeans++ is as follows: ``` # set KMeans++ flag kmeans.set_use_kmeanspp(True) ``` Completing rest of the steps to get result: ``` # KMeans training kmeans.train(train_features) # cluster centers centers = kmeans.get_cluster_centers() # Labels for data points result = kmeans.apply() # plot the results plotResult('KMeans with KMeans++ Results') ``` To switch back to random initialization, you may use: ``` #unset KMeans++ flag kmeans.set_use_kmeanspp(False) ``` ## Training Methods Shogun offers 2 training methods for KMeans clustering:<ul><li><a href='http://en.wikipedia.org/wiki/K-means_clustering#Standard_algorithm'>Classical Lloyd's training</a> (default)</li><li><a href='http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf'>mini-batch KMeans training</a></li></ul>Lloyd's training method is used by Shogun by default unless user switches to mini-batch training method. ### Mini-Batch KMeans Mini-batch KMeans is very useful in case of extremely large datasets and/or very high dimensional data which is often the case in text mining. One can switch to Mini-batch KMeans training while creating KMeans object as follows: ``` # set training method to mini-batch kmeans = KMeansMiniBatch(k, distance) ``` In mini-batch KMeans it is compulsory to set batch-size and number of iterations. These parameters can be set together or one after the other. ``` # set both parameters together batch size-2 and no. of iterations-100 kmeans.set_mb_params(2,100) # OR # set batch size-2 kmeans.set_batch_size(2) # set no. of iterations-100 kmeans.set_mb_iter(100) ``` Completing the code to get results: ``` # KMeans training kmeans.train(train_features) # cluster centers centers = kmeans.get_cluster_centers() # Labels for data points result = kmeans.apply() # plot the results plotResult('Mini-batch KMeans Results') ``` ## Applying KMeans on Real Data In this section we see how useful KMeans can be in classifying the different varieties of Iris plant. For this purpose, we make use of Fisher's Iris dataset borrowed from the <a href='http://archive.ics.uci.edu/ml/datasets/Iris'>UCI Machine Learning Repository</a>. There are 3 varieties of Iris plants <ul><li>Iris Sensosa</li><li>Iris Versicolour</li><li>Iris Virginica</li></ul> The Iris dataset enlists 4 features that can be used to segregate these varieties, namely <ul><li>sepal length</li><li>sepal width</li><li>petal length</li><li>petal width</li></ul> It is additionally acknowledged that petal length and petal width are the 2 most important features (ie. features with very high class correlations)[refer to <a href='http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.names'>summary statistics</a>]. Since the entire feature vector is impossible to plot, we only plot these two most important features in order to understand the dataset (at least partially). Note that we could have extracted the 2 most important features by applying PCA (or any one of the many dimensionality reduction methods available in Shogun) as well. ``` f = open(os.path.join(SHOGUN_DATA_DIR, 'uci/iris/iris.data')) feats = [] # read data from file for line in f: words = line.rstrip().split(',') feats.append([float(i) for i in words[0:4]]) f.close() # create observation matrix obsmatrix = array(feats).T # plot the data figure,axis = pyplot.subplots(1,1) # First 50 data belong to Iris Sentosa, plotted in green axis.plot(obsmatrix[2,0:50], obsmatrix[3,0:50], 'o', color='green', markersize=5) # Next 50 data belong to Iris Versicolour, plotted in red axis.plot(obsmatrix[2,50:100], obsmatrix[3,50:100], 'o', color='red', markersize=5) # Last 50 data belong to Iris Virginica, plotted in blue axis.plot(obsmatrix[2,100:150], obsmatrix[3,100:150], 'o', color='blue', markersize=5) axis.set_xlim(-1,8) axis.set_ylim(-1,3) axis.set_title('3 varieties of Iris plants') pyplot.show() ``` In the above plot we see that the data points labelled Iris Sentosa form a nice separate cluster of their own. But in case of other 2 varieties, while the data points of same label do form clusters of their own, there is some mixing between the clusters at the boundary. Now let us apply KMeans algorithm and see how well we can extract these clusters. ``` def apply_kmeans_iris(data): # wrap to Shogun features train_features = features(data) # number of cluster centers = 3 k = 3 # distance function features - euclidean distance = sg.distance('EuclideanDistance') distance.init(train_features, train_features) # initialize KMeans object kmeans = KMeans(k, distance) # use kmeans++ to initialize centers [play around: change it to False and compare results] kmeans.set_use_kmeanspp(True) # training method is Lloyd by default [play around: change it to mini-batch by uncommenting the following lines] #kmeans.set_train_method(KMM_MINI_BATCH) #kmeans.set_mbKMeans_params(20,30) # training kmeans kmeans.train(train_features) # labels for data points result = kmeans.apply() return result result = apply_kmeans_iris(obsmatrix) ``` Now let us create a 2-D plot of the clusters formed making use of the two most important features (petal length and petal width) and compare it with the earlier plot depicting the actual labels of data points. ``` # plot the clusters over the original points in 2 dimensions figure,axis = pyplot.subplots(1,1) for i in range(150): if result[i]==0.0: axis.plot(obsmatrix[2,i],obsmatrix[3,i],'ko',color='r', markersize=5) elif result[i]==1.0: axis.plot(obsmatrix[2,i],obsmatrix[3,i],'ko',color='g', markersize=5) else: axis.plot(obsmatrix[2,i],obsmatrix[3,i],'ko',color='b', markersize=5) axis.set_xlim(-1,8) axis.set_ylim(-1,3) axis.set_title('Iris plants clustered based on attributes') pyplot.show() ``` From the above plot, it can be inferred that the accuracy of KMeans algorithm is very high for Iris dataset. Don't believe me? Alright, then let us make use of one of Shogun's clustering evaluation techniques to formally validate the claim. But before that, we have to label each sample in the dataset with a label corresponding to the class to which it belongs. ``` from numpy import ones, zeros # first 50 are iris sensosa labelled 0, next 50 are iris versicolour labelled 1 and so on labels = concatenate((zeros(50),ones(50),2.*ones(50)),0) # bind labels assigned to Shogun multiclass labels ground_truth = MulticlassLabels(array(labels,dtype='float64')) ``` Now we can compute clustering accuracy making use of the [ClusteringAccuracy class](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CClusteringAccuracy.html) in Shogun ``` from numpy import nonzero def analyzeResult(result): # shogun object for clustering accuracy AccuracyEval = ClusteringAccuracy() # changes the labels of result (keeping clusters intact) to produce a best match with ground truth AccuracyEval.best_map(result, ground_truth) # evaluates clustering accuracy accuracy = AccuracyEval.evaluate(result, ground_truth) # find out which sample points differ from actual labels (or ground truth) compare = result.get_labels()-labels diff = nonzero(compare) return (diff,accuracy) (diff,accuracy_4d) = analyzeResult(result) print('Accuracy : ' + str(accuracy_4d)) # plot the difference between ground truth and predicted clusters figure,axis = pyplot.subplots(1,1) axis.plot(obsmatrix[2,:],obsmatrix[3,:],'x',color='black', markersize=5) axis.plot(obsmatrix[2,diff],obsmatrix[3,diff],'x',color='r', markersize=7) axis.set_xlim(-1,8) axis.set_ylim(-1,3) axis.set_title('Difference') pyplot.show() ``` In the above plot, wrongly clustered data points are marked in red. We see that the Iris Sentosa plants are perfectly clustered without error. The Iris Versicolour plants and Iris Virginica plants are also clustered with high accuracy, but there are some plant samples of either class that have been clustered with the wrong class. This happens near the boundary of the 2 classes in the plot and was well expected. Having mastered KMeans, it's time to move on to next interesting topic. ## PCA as a preprocessor to KMeans KMeans is highly affected by the <i>curse of dimensionality</i>. So, dimension reduction becomes an important preprocessing step. Shogun offers a variety of [dimension reduction techniques](http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CDimensionReductionPreprocessor.html) to choose from. Since our data is not very high dimensional, PCA is a good choice for dimension reduction. We have already seen the accuracy of KMeans when all four dimensions are used. In the following exercise we shall see how the accuracy varies as one chooses lower dimensions to represent data. ### 1-Dimensional representation Let us first apply PCA to reduce training features to 1 dimension ``` from numpy import dot def apply_pca_to_data(target_dims): train_features = features(obsmatrix) submean = PruneVarSubMean(False) submean.init(train_features) submean.apply_to_feature_matrix(train_features) preprocessor = PCA() preprocessor.set_target_dim(target_dims) preprocessor.init(train_features) pca_transform = preprocessor.get_transformation_matrix() new_features = dot(pca_transform.T, train_features) return new_features oneD_matrix = apply_pca_to_data(1) ``` Next, let us get an idea of the data in 1-D by plotting it. ``` figure,axis = pyplot.subplots(1,1) # First 50 data belong to Iris Sentosa, plotted in green axis.plot(oneD_matrix[0,0:50], zeros(50), 'o', color='green', markersize=5) # Next 50 data belong to Iris Versicolour, plotted in red axis.plot(oneD_matrix[0,50:100], zeros(50), 'o', color='red', markersize=5) # Last 50 data belong to Iris Virginica, plotted in blue axis.plot(oneD_matrix[0,100:150], zeros(50), 'o', color='blue', markersize=5) axis.set_xlim(-5,5) axis.set_ylim(-1,1) axis.set_title('3 varieties of Iris plants') pyplot.show() ``` Let us now apply KMeans to the 1-D data to get clusters. ``` result = apply_kmeans_iris(oneD_matrix) ``` Now that we have the results, the inevitable step is to check how good these results are. ``` (diff,accuracy_1d) = analyzeResult(result) print('Accuracy : ' + str(accuracy_1d)) # plot the difference between ground truth and predicted clusters figure,axis = pyplot.subplots(1,1) axis.plot(oneD_matrix[0,:],zeros(150),'x',color='black', markersize=5) axis.plot(oneD_matrix[0,diff],zeros(len(diff)),'x',color='r', markersize=7) axis.set_xlim(-5,5) axis.set_ylim(-1,1) axis.set_title('Difference') pyplot.show() ``` ### 2-Dimensional Representation We follow the same steps as above and get the clustering accuracy. STEP 1 : Apply PCA and plot the data (plotting is optional) ``` twoD_matrix = apply_pca_to_data(2) figure,axis = pyplot.subplots(1,1) # First 50 data belong to Iris Sentosa, plotted in green axis.plot(twoD_matrix[0,0:50], twoD_matrix[1,0:50], 'o', color='green', markersize=5) # Next 50 data belong to Iris Versicolour, plotted in red axis.plot(twoD_matrix[0,50:100], twoD_matrix[1,50:100], 'o', color='red', markersize=5) # Last 50 data belong to Iris Virginica, plotted in blue axis.plot(twoD_matrix[0,100:150], twoD_matrix[1,100:150], 'o', color='blue', markersize=5) axis.set_title('3 varieties of Iris plants') pyplot.show() ``` STEP 2 : Apply KMeans to obtain clusters ``` result = apply_kmeans_iris(twoD_matrix) ``` STEP 3: Get the accuracy of the results ``` (diff,accuracy_2d) = analyzeResult(result) print('Accuracy : ' + str(accuracy_2d)) # plot the difference between ground truth and predicted clusters figure,axis = pyplot.subplots(1,1) axis.plot(twoD_matrix[0,:],twoD_matrix[1,:],'x',color='black', markersize=5) axis.plot(twoD_matrix[0,diff],twoD_matrix[1,diff],'x',color='r', markersize=7) axis.set_title('Difference') pyplot.show() ``` ### 3-Dimensional Representation Again, we follow the same steps, but skip plotting data. STEP 1: Apply PCA to data ``` threeD_matrix = apply_pca_to_data(3) ``` STEP 2: Apply KMeans to 3-D representation of data ``` result = apply_kmeans_iris(threeD_matrix) ``` STEP 3: Get accuracy of results. In this step, the 'difference' plot positions data points based petal length and petal width in the original data. This will enable us to visually compare these results with that of KMeans applied to 4-Dimensional data (ie. our first result on Iris dataset) ``` (diff,accuracy_3d) = analyzeResult(result) print('Accuracy : ' + str(accuracy_3d)) # plot the difference between ground truth and predicted clusters figure,axis = pyplot.subplots(1,1) axis.plot(obsmatrix[2,:],obsmatrix[3,:],'x',color='black', markersize=5) axis.plot(obsmatrix[2,diff],obsmatrix[3,diff],'x',color='r', markersize=7) axis.set_title('Difference') axis.set_xlim(-1,8) axis.set_ylim(-1,3) pyplot.show() ``` Finally, let us plot clustering accuracy vs. number of dimensions to consolidate our results. ``` from scipy.interpolate import interp1d from numpy import linspace x = array([1, 2, 3, 4]) y = array([accuracy_1d, accuracy_2d, accuracy_3d, accuracy_4d]) f = interp1d(x, y) xnew = linspace(1,4,10) pyplot.plot(x,y,'o',xnew,f(xnew),'-') pyplot.xlim([0,5]) pyplot.xlabel('no. of dims') pyplot.ylabel('Clustering Accuracy') pyplot.title('PCA Results') pyplot.show() ``` The above plot is not very intuitive theoretically. The accuracy obtained by using just one latent dimension is much more than that obtained by taking all four features features. A plausible explanation could be that the mixing of data points from Iris Versicolour and Iris Virginica is least along the single principal dimension chosen by PCA. Additional dimensions only aggrevate this inter-mixing, thus resulting in poorer clustering accuracy. While there could be other explanations to the observed results, our small experiment has successfully highlighted the importance of PCA. Not only does it reduce the complexity of running KMeans, it also enhances results at times. ## References [1] D. Sculley. Web-scale k-means clustering. In Proceedings of the 19th international conference on World wide web, pages 1177–1178. ACM, 2010 [2] Bishop, C. M., & others. (2006). Pattern recognition and machine learning. Springer New York. [3] Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science
github_jupyter
Deep Learning ============= Assignment 2 ------------ Previously in `1_notmnist.ipynb`, we created a pickle with formatted datasets for training, development and testing on the [notMNIST dataset](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html). The goal of this assignment is to progressively train deeper and more accurate models using TensorFlow. ``` # These are all the modules we'll be using later. Make sure you can import them # before proceeding further. from __future__ import print_function import numpy as np import tensorflow as tf from six.moves import cPickle as pickle from six.moves import range ``` First reload the data we generated in `1_notmnist.ipynb`. ``` pickle_file = '../2_DeepLearningPreliminaries/notMNIST.pickle' with open(pickle_file, 'rb') as f: save = pickle.load(f) train_dataset = save['train_dataset'] train_labels = save['train_labels'] valid_dataset = save['valid_dataset'] valid_labels = save['valid_labels'] test_dataset = save['test_dataset'] test_labels = save['test_labels'] del save # hint to help gc free up memory print('Training set', train_dataset.shape, train_labels.shape) print('Validation set', valid_dataset.shape, valid_labels.shape) print('Test set', test_dataset.shape, test_labels.shape) ``` Reformat into a shape that's more adapted to the models we're going to train: - data as a flat matrix, - labels as float 1-hot encodings. ``` image_size = 28 num_labels = 10 def reformat(dataset, labels): dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32) # Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...] labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32) return dataset, labels train_dataset, train_labels = reformat(train_dataset, train_labels) valid_dataset, valid_labels = reformat(valid_dataset, valid_labels) test_dataset, test_labels = reformat(test_dataset, test_labels) print('Training set', train_dataset.shape, train_labels.shape) print('Validation set', valid_dataset.shape, valid_labels.shape) print('Test set', test_dataset.shape, test_labels.shape) ``` We're first going to train a multinomial logistic regression using simple gradient descent. TensorFlow works like this: * First you describe the computation that you want to see performed: what the inputs, the variables, and the operations look like. These get created as nodes over a computation graph. This description is all contained within the block below: with graph.as_default(): ... * Then you can run the operations on this graph as many times as you want by calling `session.run()`, providing it outputs to fetch from the graph that get returned. This runtime operation is all contained in the block below: with tf.Session(graph=graph) as session: ... Let's load all the data into TensorFlow and build the computation graph corresponding to our training: ``` # With gradient descent training, even this much data is prohibitive. # Subset the training data for faster turnaround. train_subset = 10000 graph = tf.Graph() with graph.as_default(): # Input data. # Load the training, validation and test data into constants that are # attached to the graph. tf_train_dataset = tf.constant(train_dataset[:train_subset, :]) tf_train_labels = tf.constant(train_labels[:train_subset]) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. # These are the parameters that we are going to be training. The weight # matrix will be initialized using random values following a (truncated) # normal distribution. The biases get initialized to zero. weights = tf.Variable( tf.truncated_normal([image_size * image_size, num_labels])) biases = tf.Variable(tf.zeros([num_labels])) # Training computation. # We multiply the inputs with the weight matrix, and add biases. We compute # the softmax and cross-entropy (it's one operation in TensorFlow, because # it's very common, and it can be optimized). We take the average of this # cross-entropy across all training examples: that's our loss. logits = tf.matmul(tf_train_dataset, weights) + biases loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits)) # Optimizer. # We are going to find the minimum of this loss using gradient descent. optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. # These are not part of training, but merely here so that we can report # accuracy figures as we train. train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax( tf.matmul(tf_valid_dataset, weights) + biases) test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases) ``` Let's run this computation and iterate: ``` num_steps = 801 def accuracy(predictions, labels): return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0]) with tf.Session(graph=graph) as session: # This is a one-time operation which ensures the parameters get initialized as # we described in the graph: random weights for the matrix, zeros for the # biases. tf.global_variables_initializer().run() print('Initialized') for step in range(num_steps): # Run the computations. We tell .run() that we want to run the optimizer, # and get the loss value and the training predictions returned as numpy # arrays. _, l, predictions = session.run([optimizer, loss, train_prediction]) if (step % 100 == 0): print('Loss at step %d: %f' % (step, l)) print('Training accuracy: %.1f%%' % accuracy( predictions, train_labels[:train_subset, :])) # Calling .eval() on valid_prediction is basically like calling run(), but # just to get that one numpy array. Note that it recomputes all its graph # dependencies. print('Validation accuracy: %.1f%%' % accuracy( valid_prediction.eval(), valid_labels)) print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels)) ``` Let's now switch to stochastic gradient descent training instead, which is much faster. The graph will be similar, except that instead of holding all the training data into a constant node, we create a `Placeholder` node which will be fed actual data at every call of `session.run()`. ``` tf.reset_default_graph() batch_size = 128 graph = tf.Graph() with graph.as_default(): # Input data. For the training data, we use a placeholder that will be fed # at run time with a training minibatch. tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. weights = tf.Variable( tf.truncated_normal([image_size * image_size, num_labels])) biases = tf.Variable(tf.zeros([num_labels])) # Training computation. logits = tf.matmul(tf_train_dataset, weights) + biases loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits)) # Optimizer. optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax( tf.matmul(tf_valid_dataset, weights) + biases) test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases) ``` Let's run it: ``` num_steps = 3001 with tf.Session(graph=graph) as session: writer = tf.summary.FileWriter('graphs/fullyconnected', session.graph) tf.global_variables_initializer().run() print("Initialized") for step in range(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = (step * batch_size) % (train_labels.shape[0] - batch_size) # Generate a minibatch. batch_data = train_dataset[offset:(offset + batch_size), :] batch_labels = train_labels[offset:(offset + batch_size), :] # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run( [optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 500 == 0): print("Minibatch loss at step %d: %f" % (step, l)) print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)) print("Validation accuracy: %.1f%%" % accuracy( valid_prediction.eval(), valid_labels)) print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels)) writer.close() ``` --- Problem ------- Turn the logistic regression example with SGD into a 1-hidden layer neural network with rectified linear units [nn.relu()](https://www.tensorflow.org/versions/r0.7/api_docs/python/nn.html#relu) and 1024 hidden nodes. This model should improve your validation / test accuracy. --- ``` tf.reset_default_graph() batch_size = 128 hidden = 2000 num_steps = 3001 # Input data. For the training data, we use a placeholder that will be fed # at run time with a training minibatch. tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size), name='train_dataset') tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels), name='train_labels') tf_valid_dataset = tf.constant(valid_dataset, name='validation_dataset') tf_test_dataset = tf.constant(test_dataset, name='test_dataset') # Variables from input to hidden layer. # Take hidden as the number of nodes in the hidden layer. weights1 = tf.get_variable('weights1', initializer=tf.truncated_normal([image_size * image_size, hidden])) biases1 = tf.get_variable('biases1', initializer=tf.zeros([hidden])) # Hidden layer = ReLU hidden1 = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1) # Variables from hidden layer to output. weights2s = tf.nn.relu(weights1, name='relu_weights') biases2s = tf.nn.relu(biases1, name='relu_biases') # Take average weights2 = tf.get_variable('weights2', initializer=tf.truncated_normal([hidden, num_labels])) biases2 = tf.get_variable('biases2', initializer=tf.zeros([num_labels])) # Training computation. logits = tf.matmul(hidden1, weights2) + biases2 loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits), name='loss') # Optimizer. optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu((tf.matmul(tf_valid_dataset, weights1) + biases1)), weights2) + biases2) test_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu((tf.matmul(tf_test_dataset, weights1) + biases1)), weights2) + biases2) with tf.Session() as session: writer = tf.summary.FileWriter('graphs/fullyconnected', session.graph) tf.global_variables_initializer().run() print("Initialized") for step in range(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = (step * batch_size) % (train_labels.shape[0] - batch_size) # Generate a minibatch. batch_data = train_dataset[offset:(offset + batch_size), :] batch_labels = train_labels[offset:(offset + batch_size), :] # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, pred = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 500 == 0): print("Minibatch loss at step %d: %f" % (step, l)) print("Minibatch accuracy: %.1f%%" % accuracy(pred, batch_labels)) print("Validation accuracy: %.1f%%" % accuracy(valid_prediction.eval(), valid_labels)) print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels)) writer.close() ```
github_jupyter
``` #IMPORT SEMUA LIBARARY #IMPORT LIBRARY PANDAS import pandas as pd #IMPORT LIBRARY UNTUK POSTGRE from sqlalchemy import create_engine import psycopg2 #IMPORT LIBRARY CHART from matplotlib import pyplot as plt from matplotlib import style #IMPORT LIBRARY BASE PATH import os import io #IMPORT LIBARARY PDF from fpdf import FPDF #IMPORT LIBARARY CHART KE BASE64 import base64 #IMPORT LIBARARY EXCEL import xlsxwriter #FUNGSI UNTUK MENGUPLOAD DATA DARI CSV KE POSTGRESQL def uploadToPSQL(columns, table, filePath, engine): #FUNGSI UNTUK MEMBACA CSV df = pd.read_csv( os.path.abspath(filePath), names=columns, keep_default_na=False ) #APABILA ADA FIELD KOSONG DISINI DIFILTER df.fillna('') #MENGHAPUS COLUMN YANG TIDAK DIGUNAKAN del df['kategori'] del df['jenis'] del df['pengiriman'] del df['satuan'] #MEMINDAHKAN DATA DARI CSV KE POSTGRESQL df.to_sql( table, engine, if_exists='replace' ) #DIHITUNG APABILA DATA YANG DIUPLOAD BERHASIL, MAKA AKAN MENGEMBALIKAN KELUARAN TRUE(BENAR) DAN SEBALIKNYA if len(df) == 0: return False else: return True #FUNGSI UNTUK MEMBUAT CHART, DATA YANG DIAMBIL DARI DATABASE DENGAN MENGGUNAKAN ORDER DARI TANGGAL DAN JUGA LIMIT #DISINI JUGA MEMANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF def makeChart(host, username, password, db, port, table, judul, columns, filePath, name, subjudul, limit, negara, basePath): #TEST KONEKSI DATABASE try: #KONEKSI KE DATABASE connection = psycopg2.connect(user=username,password=password,host=host,port=port,database=db) cursor = connection.cursor() #MENGAMBL DATA DARI TABLE YANG DIDEFINISIKAN DIBAWAH, DAN DIORDER DARI TANGGAL TERAKHIR #BISA DITAMBAHKAN LIMIT SUPAYA DATA YANG DIAMBIL TIDAK TERLALU BANYAK DAN BERAT postgreSQL_select_Query = "SELECT * FROM "+table+" ORDER BY tanggal ASC LIMIT " + str(limit) cursor.execute(postgreSQL_select_Query) mobile_records = cursor.fetchall() uid = [] lengthx = [] lengthy = [] #MELAKUKAN LOOPING ATAU PERULANGAN DARI DATA YANG SUDAH DIAMBIL #KEMUDIAN DATA TERSEBUT DITEMPELKAN KE VARIABLE DIATAS INI for row in mobile_records: uid.append(row[0]) lengthx.append(row[1]) if row[2] == "": lengthy.append(float(0)) else: lengthy.append(float(row[2])) #FUNGSI UNTUK MEMBUAT CHART #bar style.use('ggplot') fig, ax = plt.subplots() #MASUKAN DATA ID DARI DATABASE, DAN JUGA DATA TANGGAL ax.bar(uid, lengthy, align='center') #UNTUK JUDUL CHARTNYA ax.set_title(judul) ax.set_ylabel('Total') ax.set_xlabel('Tanggal') ax.set_xticks(uid) #TOTAL DATA YANG DIAMBIL DARI DATABASE, DIMASUKAN DISINI ax.set_xticklabels((lengthx)) b = io.BytesIO() #CHART DISIMPAN KE FORMAT PNG plt.savefig(b, format='png', bbox_inches="tight") #CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64 barChart = base64.b64encode(b.getvalue()).decode("utf-8").replace("\n", "") #CHART DITAMPILKAN plt.show() #line #MASUKAN DATA DARI DATABASE plt.plot(lengthx, lengthy) plt.xlabel('Tanggal') plt.ylabel('Total') #UNTUK JUDUL CHARTNYA plt.title(judul) plt.grid(True) l = io.BytesIO() #CHART DISIMPAN KE FORMAT PNG plt.savefig(l, format='png', bbox_inches="tight") #CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64 lineChart = base64.b64encode(l.getvalue()).decode("utf-8").replace("\n", "") #CHART DITAMPILKAN plt.show() #pie #UNTUK JUDUL CHARTNYA plt.title(judul) #MASUKAN DATA DARI DATABASE plt.pie(lengthy, labels=lengthx, autopct='%1.1f%%', shadow=True, startangle=180) plt.axis('equal') p = io.BytesIO() #CHART DISIMPAN KE FORMAT PNG plt.savefig(p, format='png', bbox_inches="tight") #CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64 pieChart = base64.b64encode(p.getvalue()).decode("utf-8").replace("\n", "") #CHART DITAMPILKAN plt.show() #MENGAMBIL DATA DARI CSV YANG DIGUNAKAN SEBAGAI HEADER DARI TABLE UNTUK EXCEL DAN JUGA PDF header = pd.read_csv( os.path.abspath(filePath), names=columns, keep_default_na=False ) #MENGHAPUS COLUMN YANG TIDAK DIGUNAKAN header.fillna('') del header['tanggal'] del header['total'] #MEMANGGIL FUNGSI EXCEL makeExcel(mobile_records, header, name, limit, basePath) #MEMANGGIL FUNGSI PDF makePDF(mobile_records, header, judul, barChart, lineChart, pieChart, name, subjudul, limit, basePath) #JIKA GAGAL KONEKSI KE DATABASE, MASUK KESINI UNTUK MENAMPILKAN ERRORNYA except (Exception, psycopg2.Error) as error : print (error) #KONEKSI DITUTUP finally: if(connection): cursor.close() connection.close() #FUNGSI MAKEEXCEL GUNANYA UNTUK MEMBUAT DATA YANG BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2 #PLUGIN YANG DIGUNAKAN ADALAH XLSXWRITER def makeExcel(datarow, dataheader, name, limit, basePath): #MEMBUAT FILE EXCEL workbook = xlsxwriter.Workbook(basePath+'jupyter/BLOOMBERG/SektorHargaInflasi/excel/'+name+'.xlsx') #MENAMBAHKAN WORKSHEET PADA FILE EXCEL TERSEBUT worksheet = workbook.add_worksheet('sheet1') #SETINGAN AGAR DIBERIKAN BORDER DAN FONT MENJADI BOLD row1 = workbook.add_format({'border': 2, 'bold': 1}) row2 = workbook.add_format({'border': 2}) #MENJADIKAN DATA MENJADI ARRAY data=list(datarow) isihead=list(dataheader.values) header = [] body = [] #LOOPING ATAU PERULANGAN, KEMUDIAN DATA DITAMPUNG PADA VARIABLE DIATAS for rowhead in dataheader: header.append(str(rowhead)) for rowhead2 in datarow: header.append(str(rowhead2[1])) for rowbody in isihead[1]: body.append(str(rowbody)) for rowbody2 in data: body.append(str(rowbody2[2])) #MEMASUKAN DATA DARI VARIABLE DIATAS KE DALAM COLUMN DAN ROW EXCEL for col_num, data in enumerate(header): worksheet.write(0, col_num, data, row1) for col_num, data in enumerate(body): worksheet.write(1, col_num, data, row2) #FILE EXCEL DITUTUP workbook.close() #FUNGSI UNTUK MEMBUAT PDF YANG DATANYA BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2 #PLUGIN YANG DIGUNAKAN ADALAH FPDF def makePDF(datarow, dataheader, judul, bar, line, pie, name, subjudul, lengthPDF, basePath): #FUNGSI UNTUK MENGATUR UKURAN KERTAS, DISINI MENGGUNAKAN UKURAN A4 DENGAN POSISI LANDSCAPE pdf = FPDF('L', 'mm', [210,297]) #MENAMBAHKAN HALAMAN PADA PDF pdf.add_page() #PENGATURAN UNTUK JARAK PADDING DAN JUGA UKURAN FONT pdf.set_font('helvetica', 'B', 20.0) pdf.set_xy(145.0, 15.0) #MEMASUKAN JUDUL KE DALAM PDF pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=judul, border=0) #PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING pdf.set_font('arial', '', 14.0) pdf.set_xy(145.0, 25.0) #MEMASUKAN SUB JUDUL KE PDF pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=subjudul, border=0) #MEMBUAT GARIS DI BAWAH SUB JUDUL pdf.line(10.0, 30.0, 287.0, 30.0) pdf.set_font('times', '', 10.0) pdf.set_xy(17.0, 37.0) #PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING pdf.set_font('Times','',10.0) #MENGAMBIL DATA HEADER PDF YANG SEBELUMNYA SUDAH DIDEFINISIKAN DIATAS datahead=list(dataheader.values) pdf.set_font('Times','B',12.0) pdf.ln(0.5) th1 = pdf.font_size #MEMBUAT TABLE PADA PDF, DAN MENAMPILKAN DATA DARI VARIABLE YANG SUDAH DIKIRIM pdf.cell(100, 2*th1, "Kategori", border=1, align='C') pdf.cell(177, 2*th1, datahead[0][0], border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Jenis", border=1, align='C') pdf.cell(177, 2*th1, datahead[0][1], border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Pengiriman", border=1, align='C') pdf.cell(177, 2*th1, datahead[0][2], border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Satuan", border=1, align='C') pdf.cell(177, 2*th1, datahead[0][3], border=1, align='C') pdf.ln(2*th1) #PENGATURAN PADDING pdf.set_xy(17.0, 75.0) #PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING pdf.set_font('Times','B',11.0) data=list(datarow) epw = pdf.w - 2*pdf.l_margin col_width = epw/(lengthPDF+1) #PENGATURAN UNTUK JARAK PADDING pdf.ln(0.5) th = pdf.font_size #MEMASUKAN DATA HEADER YANG DIKIRIM DARI VARIABLE DIATAS KE DALAM PDF pdf.cell(50, 2*th, str("Negara"), border=1, align='C') for row in data: pdf.cell(40, 2*th, str(row[1]), border=1, align='C') pdf.ln(2*th) #MEMASUKAN DATA ISI YANG DIKIRIM DARI VARIABLE DIATAS KE DALAM PDF pdf.set_font('Times','B',10.0) pdf.set_font('Arial','',9) pdf.cell(50, 2*th, negara, border=1, align='C') for row in data: pdf.cell(40, 2*th, str(row[2]), border=1, align='C') pdf.ln(2*th) #MENGAMBIL DATA CHART, KEMUDIAN CHART TERSEBUT DIJADIKAN PNG DAN DISIMPAN PADA DIRECTORY DIBAWAH INI #BAR CHART bardata = base64.b64decode(bar) barname = basePath+'jupyter/BLOOMBERG/SektorHargaInflasi/img/'+name+'-bar.png' with open(barname, 'wb') as f: f.write(bardata) #LINE CHART linedata = base64.b64decode(line) linename = basePath+'jupyter/BLOOMBERG/SektorHargaInflasi/img/'+name+'-line.png' with open(linename, 'wb') as f: f.write(linedata) #PIE CHART piedata = base64.b64decode(pie) piename = basePath+'jupyter/BLOOMBERG/SektorHargaInflasi/img/'+name+'-pie.png' with open(piename, 'wb') as f: f.write(piedata) #PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING pdf.set_xy(17.0, 75.0) col = pdf.w - 2*pdf.l_margin widthcol = col/3 #MEMANGGIL DATA GAMBAR DARI DIREKTORY DIATAS pdf.image(barname, link='', type='',x=8, y=100, w=widthcol) pdf.set_xy(17.0, 75.0) col = pdf.w - 2*pdf.l_margin pdf.image(linename, link='', type='',x=103, y=100, w=widthcol) pdf.set_xy(17.0, 75.0) col = pdf.w - 2*pdf.l_margin pdf.image(piename, link='', type='',x=195, y=100, w=widthcol) pdf.ln(2*th) #MEMBUAT FILE PDF pdf.output(basePath+'jupyter/BLOOMBERG/SektorHargaInflasi/pdf/'+name+'.pdf', 'F') #DISINI TEMPAT AWAL UNTUK MENDEFINISIKAN VARIABEL VARIABEL SEBELUM NANTINYA DIKIRIM KE FUNGSI #PERTAMA MANGGIL FUNGSI UPLOADTOPSQL DULU, KALAU SUKSES BARU MANGGIL FUNGSI MAKECHART #DAN DI MAKECHART MANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF #DEFINISIKAN COLUMN BERDASARKAN FIELD CSV columns = [ "kategori", "jenis", "tanggal", "total", "pengiriman", "satuan", ] #UNTUK NAMA FILE name = "SektorHargaInflasi3_4" #VARIABLE UNTUK KONEKSI KE DATABASE host = "localhost" username = "postgres" password = "1234567890" port = "5432" database = "bloomberg_SektorHargaInflasi" table = name.lower() #JUDUL PADA PDF DAN EXCEL judul = "Data Sektor Harga Inflasi" subjudul = "Badan Perencanaan Pembangunan Nasional" #LIMIT DATA UNTUK SELECT DI DATABASE limitdata = int(8) #NAMA NEGARA UNTUK DITAMPILKAN DI EXCEL DAN PDF negara = "Indonesia" #BASE PATH DIRECTORY basePath = 'C:/Users/ASUS/Documents/bappenas/' #FILE CSV filePath = basePath+ 'data mentah/BLOOMBERG/SektorHargaInflasi/' +name+'.csv'; #KONEKSI KE DATABASE engine = create_engine('postgresql://'+username+':'+password+'@'+host+':'+port+'/'+database) #MEMANGGIL FUNGSI UPLOAD TO PSQL checkUpload = uploadToPSQL(columns, table, filePath, engine) #MENGECEK FUNGSI DARI UPLOAD PSQL, JIKA BERHASIL LANJUT MEMBUAT FUNGSI CHART, JIKA GAGAL AKAN MENAMPILKAN PESAN ERROR if checkUpload == True: makeChart(host, username, password, database, port, table, judul, columns, filePath, name, subjudul, limitdata, negara, basePath) else: print("Error When Upload CSV") ```
github_jupyter
``` import arviz as az import matplotlib.pyplot as plt import numpy as np from scipy import stats az.style.use("arviz-white") ``` # Metropolis sampling for the concussions data Let $Y_i$ be the number of concussions (aggregated over all teams and games) in season i (1=2012,…,4=2015). We model these counts as: $$Y_i\sim\mbox{Poisson}(N\lambda_i) \mbox{ where } \lambda_i=\exp(\beta_1+i\beta_2)$$ where $N$ is the number of games played per year and $\lambda_i$ is the rate in year $i$. To complete the Bayesian model, we specify uninformative priors $\beta_1,\beta_2\sim\mbox{Normal}(0,\tau^2)$. The log of the mean concussion rate is linear in time with $\beta_2$ determining the slope. The objective is to determine if the concussion rate is increasing, i.e., $\beta_2>0$. ## Initialize ``` Y = np.array([171, 152, 123, 199]) t = np.arange(4) n = 4 N = 256 ``` ## Compute the posterior on a grid for reference ``` # Create an empty matrix for the MCMC samples S = 25000 samples = np.zeros((S, 2)) fitted = np.zeros((S, 4)) # Initial values beta = np.array([np.log(np.mean(Y / N)), 0]) # priors: beta[j] ~ N(0,tau^2) tau = 10 sd = (1 / tau) ** 0.5 # candidate standard deviations can_sd = [0.1, 0.1] ``` ## Define the log posterior as a function ``` def log_post(Y, N, t, beta, sd): mn = N * np.exp(beta[0] + beta[1] * t) like = stats.poisson(mn).logpmf(Y).sum() prior = stats.norm(0, sd).logpdf(beta).sum() post = like + prior return post ``` ## Metropolis sampling ``` for s in range(S): for j in range(2): can = np.copy(beta) can[j] = stats.norm(beta[j], can_sd[j]).rvs(1) logR = log_post(Y, N, t, can, sd) - log_post(Y, N, t, beta, sd) if np.log(stats.uniform(0, 1).rvs(1)) < logR: beta = can samples[s:] = beta fitted[s:] = N * np.exp(beta[0] + beta[1] * t) sampler_dict = {"β1": samples[:, 0], "β2": samples[:, 1]} ``` ## Compute the acceptance rates and plot the samples ``` # Acceptance rates np.mean(np.diff(samples, axis=0) > 0, 0) az.plot_trace(sampler_dict); ``` ## Summarize the fitted values for each year The boxplots are the posterior distribution of the $N\lambda_i=N\exp(\beta_1+i\beta_2)$, and the points are the observed counts. The linear trend doesn't fit particularly well. ``` plt.boxplot(fitted[:, :4], labels=[y for y in range(2012, 2016)]) plt.ylabel("λ"); # Posterior probability that the slope is positive np.mean(samples[:, 1] > 0) ``` There is some evidence that the rate is increasing, but it seems to be driven only by the last year.
github_jupyter
# MXNet with DALI - ResNet 50 example ## Overview This example shows, how to use DALI pipelines with Apache MXNet. ## ResNet 50 pipeline Let us first define a few global constants. ``` from __future__ import print_function from nvidia.dali.pipeline import Pipeline import nvidia.dali.ops as ops import nvidia.dali.types as types N = 8 # number of GPUs batch_size = 128 # batch size per GPU db_folder = "/data/imagenet/train-480-val-256-recordio/" ``` ### The training pipeline The training pipeline consists of the following steps: * Data is first read from MXNet's recordIO file (the reader op is given a name `Reader` for later use) * Then, images are decoded using nvJPEG * RGB images are then randomly cropped and resized to the final size of (224, 224) pixels * Finally, the batch is transposed from NHWC layout to NCHW layout, normalized and randomly mirrored. `DALIClassificationIterator`, which we will use for interfacing with MXNet in this example, requires outputs of the pipeline to follow (image, label) structure. ``` class HybridTrainPipe(Pipeline): def __init__(self, batch_size, num_threads, device_id, num_gpus): super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id) self.input = ops.MXNetReader(path = [db_folder+"train.rec"], index_path=[db_folder+"train.idx"], random_shuffle = True, shard_id = device_id, num_shards = num_gpus) self.decode = ops.nvJPEGDecoderRandomCrop(device = "mixed", output_type = types.RGB, random_aspect_ratio = [0.8, 1.25], random_area = [0.1, 1.0], num_attempts = 100) self.resize = ops.Resize(device = "gpu", resize_x = 224, resize_y = 224) self.cmnp = ops.CropMirrorNormalize(device = "gpu", output_dtype = types.FLOAT, output_layout = types.NCHW, crop = (224, 224), image_type = types.RGB, mean = [0.485 * 255,0.456 * 255,0.406 * 255], std = [0.229 * 255,0.224 * 255,0.225 * 255]) self.coin = ops.CoinFlip(probability = 0.5) def define_graph(self): rng = self.coin() self.jpegs, self.labels = self.input(name = "Reader") images = self.decode(self.jpegs) images = self.resize(images) output = self.cmnp(images, mirror = rng) return [output, self.labels] ``` ### The validation pipeline The validation pipeline is similar to the training pipeline, but omits the random resized crop and random mirroring steps, as well as shuffling the data coming from the reader. ``` class HybridValPipe(Pipeline): def __init__(self, batch_size, num_threads, device_id, num_gpus): super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id) self.input = ops.MXNetReader(path = [db_folder+"val.rec"], index_path=[db_folder+"val.idx"], random_shuffle = False, shard_id = device_id, num_shards = num_gpus) self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB) self.cmnp = ops.CropMirrorNormalize(device = "gpu", output_dtype = types.FLOAT, output_layout = types.NCHW, crop = (224, 224), image_type = types.RGB, mean = [0.485 * 255,0.456 * 255,0.406 * 255], std = [0.229 * 255,0.224 * 255,0.225 * 255]) def define_graph(self): self.jpegs, self.labels = self.input(name = "Reader") images = self.decode(self.jpegs) output = self.cmnp(images) return [output, self.labels] trainpipes = [HybridTrainPipe(batch_size=batch_size, num_threads=2, device_id = i, num_gpus = N) for i in range(N)] valpipes = [HybridValPipe(batch_size=batch_size, num_threads=2, device_id = i, num_gpus = N) for i in range(N)] ``` ### Using the MXNet plugin MXNet data iterators need to know what is the size of the dataset. Since DALI pipelines may consist of multiple readers, potentially with differently sized datasets, we need to specify the reader which we ask for the epoch size. That is why we gave a name to readers in both training and validation pipelines. In order to get the epoch size out of the reader, we need to build one of the training and one of the validation pipelines. ``` trainpipes[0].build() valpipes[0].build() print("Training pipeline epoch size: {}".format(trainpipes[0].epoch_size("Reader"))) print("Validation pipeline epoch size: {}".format(valpipes[0].epoch_size("Reader"))) ``` Now we can make MXNet iterators out of our pipelines, using `DALIClassificationIterator` class. ``` from nvidia.dali.plugin.mxnet import DALIClassificationIterator dali_train_iter = DALIClassificationIterator(trainpipes, trainpipes[0].epoch_size("Reader")) dali_val_iter = DALIClassificationIterator(valpipes, valpipes[0].epoch_size("Reader")) ``` ## Training with MXNet Once we have MXNet data iterators from `DALIClassificationIterator`, we can use them instead of MXNet's`mx.io.ImageRecordIter`. Here we show modified `train_imagenet.py` example that uses our DALI pipelines. ``` import os import argparse import logging logging.basicConfig(level=logging.DEBUG) from demo.common import find_mxnet, data, fit import mxnet as mx gpus_string = "".join(str(list(range(N)))).replace('[','').replace(']','') s = ['--gpu', gpus_string, '--batch-size', str(batch_size * N), '--num-epochs', '1', '--data-train', '/data/imagenet/train-480-val-256-recordio/train.rec', '--data-val', '/data/imagenet/train-480-val-256-recordio/val.rec', '--disp-batches', '100', '--network', 'resnet-v1', '--num-layers', '50', '--data-nthreads', '40', '--min-random-scale', '0.533', '--max-random-shear-ratio', '0', '--max-random-rotate-angle', '0', '--max-random-h', '0', '--max-random-l', '0', '--max-random-s', '0', '--dtype', 'float16'] # parse args parser = argparse.ArgumentParser(description="train imagenet-1k", formatter_class=argparse.ArgumentDefaultsHelpFormatter) fit.add_fit_args(parser) data.add_data_args(parser) data.add_data_aug_args(parser) # use a large aug level data.set_data_aug_level(parser, 3) parser.set_defaults( # network network = 'resnet', num_layers = 50, # data num_classes = 1000, num_examples = 1281167, image_shape = '3,224,224', min_random_scale = 1, # if input image has min size k, suggest to use # 256.0/x, e.g. 0.533 for 480 # train num_epochs = 80, lr_step_epochs = '30,60', dtype = 'float32' ) args = parser.parse_args(s) # load network from importlib import import_module net = import_module('demo.symbols.'+args.network) sym = net.get_symbol(1000, 50, "3,224,224", dtype='float16') def get_dali_iter(args, kv=None): return (dali_train_iter, dali_val_iter) # train #fit.fit(args, sym, data.get_rec_iter) fit.fit(args, sym, get_dali_iter) ```
github_jupyter
### Classify CFAR10 images using CNN ``` import pandas as pd import numpy as np import keras from keras.models import Sequential from keras.layers import Dense, Flatten, Dropout from keras.layers import Conv2D,MaxPooling2D from keras.callbacks import ModelCheckpoint from keras import regularizers from keras.layers.normalization import BatchNormalization from keras.utils import to_categorical from keras.preprocessing import image from sklearn.model_selection import train_test_split from tqdm import tqdm import os from matplotlib import pyplot from scipy.misc import toimage from PIL import Image os.chdir("D:\My Personal Documents\Learnings\Data Science\Hackathan - Game of Deep Learning\\train") train = pd.read_csv('train.csv') image = Image.open('rotate.jpg') for i in tqdm(range(train.shape[0])): image = Image.open('images/'+train['image'][i]) # horizontal flip hoz_flip = image.transpose(Image.FLIP_LEFT_RIGHT).save('FlipedImage/'+ str(i) + '.jpg') train_image = [] for i in tqdm(range(train.shape[0])): img = image.load_img('images/'+train['image'][i], target_size=(28,28,1), grayscale=False) img = image.img_to_array(img) #img = img/255 train_image.append(img) X = np.array(train_image) X.size y=train['category'].values #y = to_categorical(y) pyplot.imshow(toimage(train_image[1])) img = pyplot.imshow(toimage(train_image[1])) len(train_image) # example of horizontal flip image augmentation from numpy import expand_dims from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.preprocessing.image import ImageDataGenerator from matplotlib import pyplot # load the image #img = load_img('bird.jpg') # convert to numpy array data = train_image[4] # expand dimension to one sample samples = expand_dims(data, 0) # create image data augmentation generator datagen = ImageDataGenerator(horizontal_flip=True) # prepare iterator it = datagen.flow(samples, batch_size=1) # generate samples and plot for i in range(6): # define subplot pyplot.subplot(330 + 1 + i) # generate batch of images batch = it.next() # convert to unsigned integers for viewing image = batch[0].astype('uint8') # plot raw pixel data pyplot.savefig('test' + str(i) + '.jpg') pyplot.imshow(image) # show the figure pyplot.show() # create flipped versions of an image from PIL import Image from matplotlib import pyplot # load image image = Image.open('rotate.jpg') #image = train_image[4] # horizontal flip hoz_flip = image.transpose(Image.FLIP_LEFT_RIGHT).save('h_test.jpg') # vertical flip #ver_flip = image.transpose(Image.FLIP_TOP_BOTTOM) # plot all three images using matplotlib #pyplot.subplot(311) #pyplot.imshow(image) #pyplot.subplot(312) #pyplot.imshow(hoz_flip) #pyplot.subplot(313) #pyplot.imshow(ver_flip) #pyplot.show() train_image[0] x_train, x_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.2) print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape) from matplotlib import pyplot from scipy.misc import toimage def show_imgs(X): pyplot.figure(1) k = 0 for i in range(0,4): for j in range(0,4): pyplot.subplot2grid((4,4),(i,j)) pyplot.imshow(toimage(X[k])) k = k+1 # show the plot pyplot.show() show_imgs(x_test[:16]) x_train=x_train/255 x_test = x_test/255 n_class=6 y_train = keras.utils.to_categorical(y_train,n_class) y_test = keras.utils.to_categorical(y_test,n_class) n_conv = 64 k_conv = (3,3) y_train.shape weight_decay = 1e-4 model = Sequential() model.add(Conv2D(32,(3,3),padding='same', activation='relu',kernel_regularizer=regularizers.l2(weight_decay))) model.add(MaxPooling2D()) model.add(Dropout(0.5)) model.add(Conv2D(64,(3,3),padding='same', activation='relu',kernel_regularizer=regularizers.l2(weight_decay))) model.add(MaxPooling2D()) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv2D(128,(3,3), padding='same',activation='relu',kernel_regularizer=regularizers.l2(weight_decay))) model.add(MaxPooling2D()) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(128,activation='relu')) model.add(Dense(6,activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(x_train,y_train,epochs=5,verbose=1, validation_data=(x_test,y_test)) test = pd.read_csv('test_ApKoW4T.csv') test_image = [] for i in tqdm(range(test.shape[0])): img = image.load_img('images/'+test['image'][i], target_size=(28,28,3), grayscale=False) img = image.img_to_array(img) img = img/255 test_image.append(img) test = np.array(test_image) prediction = model.predict_classes(test) pd.DataFrame(prediction).to_csv('test_pred.csv') model.save('trained_epoch_5.h5') filepath='trained_epoch_5.h5' from keras.callbacks import ModelCheckpoint from keras.models import load_model new_model = load_model('trained_epoch_5.h5') checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min') callback_list = [checkpoint] new_model.fit(x_train,y_train, epochs=20, validation_data=(x_test,y_test),callbacks = callback_list) new_model.save('trained_epoch_25.h5') prediction = new_model.predict_classes(test) pd.DataFrame(prediction).to_csv('test_pred.csv') filepath='trained_epoch_25.h5' new_model1 = load_model('trained_epoch_25.h5') checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min') callback_list = [checkpoint] new_model1.fit(x_train,y_train, epochs=20, validation_data=(x_test,y_test),callbacks = callback_list) prediction=new_model1.predict_classes(test) pd.DataFrame(prediction).to_csv('test_pred.csv') ```
github_jupyter
# Linear Models and OLS *Curtis Miller* **Regression** refers to the prediction of a continuous variable (income, age, height, etc.) using a dataset's features. A **linear model** is a model of the form: $$y = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + ... + \beta_K x_K + \epsilon$$ Here $\epsilon$ is an **error term**; the predicted value for $y$ is given by $\hat{y} = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + ... + \beta_K x_K$, so $y - \hat{y} = \epsilon$. $\epsilon$ is almost never zero, so for regression we must measure "accuracy" differently. The **sum of squared errors (SSE)** is the sum $\sum_{i = 1}^n (y_i - \hat{y}_i)^2$ (letting $y_i = \beta_0 + \beta_1 x_{1,i} + \beta_2 x_{2,i} + ... + \beta_K x_{K,i} + \epsilon_i$ and $\hat{y}_i$ defined analogously). We might define the "most accurate" regression model as the model that minimizes the SSE. However, when measuring performance, the **mean squared error (MSE)** is often used. The MSE is given by $\frac{\text{SSE}}{n} = \frac{1}{n}\sum_{i = 1}^{n} (y_i - \hat{y}_i)^2$. **Ordinary least squares (OLS)** is a procedure for finding a linear model that minimizes the SSE on a dataset. This is the simplest procedure for fitting a linear model on a dataset. To evaluate the model's performance we may split a dataset into training and test set, and evaluate the trained model's performance by computing the MSE of the model's predictions on the test set. If the model has a high MSE on both the training and test set, it's underfitting. If it has a small MSE on the training set and a high MSE on the test set, it is overfitting. With OLS the most important decision is which features to use in prediction and how to use them. "Linear" means linear in coefficients only; these models can handle many kinds of functions. (The models $\hat{y} = \beta_0 + \beta_1 x + \beta_2 x^2$ and $\hat{y} = \beta_0 + \beta_1 \log(x)$ are linear, but $\hat{y} = \frac{\beta_0}{1 + \beta_1 x}$ is not.) Many approaches exist for deciding which features to include. For now we will only use cross-validation. ## Fitting a Linear Model with OLS OLS is supported by the `LinearRegression` object in **scikit-learn**, while the function `mean_squared_error()` computes the MSE. I will be using OLS to find a linear model for predicting home prices in the Boston house price dataset, created below. ``` from sklearn.datasets import load_boston from sklearn.cross_validation import train_test_split boston_obj = load_boston() data, price = boston_obj.data, boston_obj.target data[:5, :] price[:5] data_train, data_test, price_train, price_test = train_test_split(data, price) data_train[:5, :] price_train[:5] ``` We will go ahead and use all features for prediction in our first linear model. (In general this does *not* necessarily produce better models; some features may introduce only noise that makes prediction *more* difficult, not less.) ``` from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error import numpy as np ols1 = LinearRegression() ols1.fit(data_train, price_train) # Fitting a linear model ols1.predict([[ # An example prediction 1, # Per capita crime rate 25, # Proportion of land zoned for large homes 5, # Proportion of land zoned for non-retail business 1, # Tract bounds the Charles River 0.3, # NOX concentration 10, # Average number of rooms per dwelling 2, # Proportion of owner-occupied units built prior to 1940 10, # Weighted distance to employment centers 3, # Index for highway accessibility 400, # Tax rate 15, # Pupil/teacher ratio 200, # Index for number of blacks 5 # % lower status of population ]]) predprice = ols1.predict(data_train) predprice[:5] mean_squared_error(price_train, predprice) np.sqrt(mean_squared_error(price_train, predprice)) ``` The square root of the mean squared error can be interpreted as the average amount of error; in this case, the average difference between homes' actual and predicted prices. (This is almost the standard deviation of the error.) For cross-validation, I will use `cross_val_score()`, which performs the entire cross-validation process. ``` from sklearn.model_selection import cross_val_score ols2 = LinearRegression() ols_cv_mse = cross_val_score(ols2, data_train, price_train, scoring='neg_mean_squared_error', cv=10) ols_cv_mse.mean() ``` The above number is the negative average MSE for cross-validation (minimizing MSE is equivalent to maximizing the negative MSE). This is close to our in-sample MSE. Let's now see the MSE for the fitted model on the test set. ``` testpredprice = ols1.predict(data_test) mean_squared_error(price_test, testpredprice) np.sqrt(mean_squared_error(price_test, testpredprice)) ``` Overfitting is minimal, it seems.
github_jupyter
In this lab, we will optimize the weather simulation application written in Fortran (if you prefer to use C++, click [this link](../../C/jupyter_notebook/profiling-c.ipynb)). Let's execute the cell below to display information about the GPUs running on the server by running the pgaccelinfo command, which ships with the PGI compiler that we will be using. To do this, execute the cell block below by giving it focus (clicking on it with your mouse), and hitting Ctrl-Enter, or pressing the play button in the toolbar above. If all goes well, you should see some output returned below the grey cell. ``` !pgaccelinfo ``` ## Exercise 3 ### Learning objectives Learn how to improve GPU occupancy and extract more parallelism by adding more descriptive clauses to the OpenACC loop constructs in the application. In this exercise you will: - Learn about GPU occupancy, and OpenACC vs CUDA execution model - Learn how to find out GPU occupancy from the Nsight Systems profiler - Learn how to improve the occupancy and saturate compute resources - Learn about collapse clause for further optimization of the parallel nested loops and when to use them - Apply collapse clause to eligible nested loops in the application and investigate the profiler report Look at the profiler report from the previous exercise again. From the timeline, have a close look at the the kernel functions. We can see that the for example `compute_tendencies_z_383_gpu` kernel has the theoretical occupancy of 37.5% . It clearly shows that the occupancy is a limiting factor. *Occupancy* is a measure of how well the GPU compute resources are being utilized. It is about how much parallelism is running / how much parallelism the hardware could run. <img src="images/occu-2.png" width="30%" height="30%"> NVIDIA GPUs are comprised of multiple streaming multiprocessors (SMs) where it can manage up to 2048 concurrent threads (not actively running at the same time). Low occupancy shows that there are not enough active threads to fully utilize the computing resources. Higher occupancy implies that the scheduler has more active threads to choose from and hence achieves higher performance. So, what does this mean in OpenACC execution model? **Gang, Worker, and Vector** CUDA and OpenACC programming model use different terminologies for similar ideas. For example, in CUDA, parallel execution is organized into grids, blocks, and threads. On the other hand, the OpenACC execution model has three levels of gang, worker, and vector. OpenACC assumes the device has multiple processing elements (Streaming Multiprocessors on NVIDIA GPUs) running in parallel and mapping of OpenACC execution model on CUDA is as below: - An OpenACC gang is a threadblock - A worker is a warp - An OpenACC vector is a CUDA thread <img src="images/diagram.png" width="50%" height="50%"> So, in order to improve the occupancy, we have to increase the parallelism within the gang. In other words, we have to increase the number of threads that can be scheduled on the GPU to improve GPU thread occupancy. **Optimizing loops and improving occupancy** Let's have a look at the compiler feedback (*Line 315*) and the corresponding code snippet showing three tightly nested loops. <img src="images/ffeedback1-1.png" width="90%" height="90%"> The iteration count for the outer loop is `NUM_VARS` which is 4. As you can see from the above screenshot, the block dimention is <4,1,1> which shows the small amount of parallelism within the gang. ```fortran !$acc parallel loop do ll = 1 , NUM_VARS do k = 1 , nz do i = 1 , nx tend(i,k,ll) = -( flux(i+1,k,ll) - flux(i,k,ll) ) / dx enddo enddo enddo ``` In order to expose more parallelism and improve the occupancy, we can use an additional clause called `collapse` in the `!$acc parallel loop` to optimize loops. The loop directive gives the compiler additional information about the next loop in the source code through several clauses. Apply the `collapse(N)` clause to a loop directive to collapse the next `N` tightly-nested loops to be collapsed into a single, flattened loop. This is useful if you have many nested loops or when you have really short loops. When the loop count in any of some tightly nested loops is relatively small compared to the available number of threads in the device, creating a single iteration space across all the nested loops, increases the iteration count thus allowing the compiler to extract more parallelism. **Tips on where to use:** - Collapse outer loops to enable creating more gangs. - Collapse inner loops to enable longer vector lengths. - Collapse all loops, when possible, to do both Now, add `collapse` clause to the code and make necessary changes to the loop directives. Once done, save the file, re-compile via `make`, and profile it again. From the top menu, click on *File*, and *Open* `miniWeather_openacc.f90` and `Makefile` from the current directory at `Fortran/source_code/lab3` directory. Remember to **SAVE** your code after changes, before running below cells. ``` !cd ../source_code/lab3 && make clean && make ``` Let us start inspecting the compiler feedback and see if it applied the optimizations. Here is the screenshot of expected compiler feedback after adding the `collapse`clause to the code. You can see that nested loops on line 281 has been successfully collapsed. <img src="images/ffeedback2.png" width="80%" height="80%"> Now, **Profile** your code with Nsight Systems command line `nsys`. ``` !cd ../source_code/lab3 && nsys profile -t nvtx,openacc --stats=true --force-overwrite true -o miniWeather_4 ./miniWeather ``` [Download the profiler output](../source_code/lab3/miniWeather_4.qdrep) and open it via the GUI. Now have a close look at the kernel functions on the timeline and the occupancy. <img src="images/occu-3.png" width="40%" height="40%"> As you can see from the above screenshot, the theoretical occupancy is now 75% and the block dimension is now `<128,1,1>` where *128* is the vector size per gang. **Screenshots represents profiler report for the values of 400,200,1500.** ```fortran !$acc parallel loop collapse(3) do ll = 1 , NUM_VARS do k = 1 , nz do i = 1 , nx tend(i,k,ll) = -( flux(i+1,k,ll) - flux(i,k,ll) ) / dx enddo enddo enddo ``` The iteration count for the collapsed loop is `NUM_VARS * nz * nx` where (in the example screenshot), - nz= 200, - nx = 400, and - NUM_VARS = 4 So, the interaction count for this particular loop inside the `compute_tendencies_z_383_gpu` function is 320K. This number divided by the vector length of *128* would gives us the grid dimension of `<2500,1,1>`. By creating a single iteration space across the nested loops and increasing the iteration count, we improved the occupancy and extracted more parallelism. **Notes:** - 100% occupancy is not required for, nor does it guarantee best performance. - Less than 50% occupancy is often a red flag How much this optimization will speed-up the code will vary according to the application and the target accelerator, but it is not uncommon to see large speed-ups by using collapse on loop nests. ## Post-Lab Summary If you would like to download this lab for later viewing, it is recommend you go to your browsers File menu (not the Jupyter notebook file menu) and save the complete web page. This will ensure the images are copied down as well. You can also execute the following cell block to create a zip-file of the files you've been working on, and download it with the link below. ``` %%bash cd .. rm -f openacc_profiler_files.zip zip -r openacc_profiler_files.zip * ``` **After** executing the above zip command, you should be able to download the zip file [here](../openacc_profiler_files.zip). ----- # <p style="text-align:center;border:3px; border-style:solid; border-color:#FF0000 ; padding: 1em"> <a href=../../profiling_start.ipynb>HOME</a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<span style="float:center"> <a href=profiling-fortran-lab4.ipynb>NEXT</a></span> </p> ----- # Links and Resources [OpenACC API Guide](https://www.openacc.org/sites/default/files/inline-files/OpenACC%20API%202.6%20Reference%20Guide.pdf) [NVIDIA Nsight System](https://docs.nvidia.com/nsight-systems/) [CUDA Toolkit Download](https://developer.nvidia.com/cuda-downloads) **NOTE**: To be able to see the Nsight System profiler output, please download Nsight System latest version from [here](https://developer.nvidia.com/nsight-systems). Don't forget to check out additional [OpenACC Resources](https://www.openacc.org/resources) and join our [OpenACC Slack Channel](https://www.openacc.org/community#slack) to share your experience and get more help from the community. --- ## Licensing This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0).
github_jupyter
--- # App --- ``` import safenet safenet.setup_logger(file_level=safenet.log_util.WARNING) myApp = safenet.App() grantedAuth = 'bAEAAAAA22X6O4AAAAAAAAAAAAAQAAAAAAAAAAACGXDI7FJQJGFWTNW3C6KKWWNKBOOU4OVE3T23X7XQZHHMYK6SFOMQAAAAAAAAAAAAGJW4ZRRI6ZMOUYL6TVN4JXT4UXSX7LLJ2FLPFKJNXWY2QXGGTKIQAAAAAAAAAAAF3ZA2HS52IHNPYCNPHSRSHEFIU3ANOS56YMQTVLFPCBJYASAQLXNAAAAAAAAAAAAGQLE3OAQLBGFLOZXTKEFHPKSNYBYPTXEVOBSUP6DR7T5T6C5BDRO54QNDZO5EDWX4BGXTZIZDSCUKNQGXJO7MGIJ2VSXRAU4AJAIF3WIAAAAAAAAAAAAAHROX26R63EZUQBWCKZQMAH6UTUKGBD4BL7XPFPUK2PZT4PO6QOIAAAAAAAAAAAD2X7DZB3BEGJXXQJYX5GG5W3IW4WLWNKFF5EQYSG7S3V4VSVVPYEGIAAAAAAAAAAAIAAAAAAAAAAABRG44C4NRSFY3TMLRYHI2TIOBTCMAAAAAAAAAAAMJTHAXDMOBOGE4DKLRSGE4DUNJUHAZREAAAAAAAAAAAGEZTQLRWHAXDCOBRFY2TOORVGQ4DGEQAAAAAAAAAAAYTGOBOGY4C4MJYGEXDMMB2GU2DQMYSAAAAAAAAAAADCMZYFY3DQLRRHAYS4OBWHI2TIOBTCIAAAAAAAAAAAMJTHAXDMOBOGE4DCLRYG45DKNBYGMJQAAAAAAAAAABRGM4C4NRYFYYTQMJOGE3DQORVGQ4DGEYAAAAAAAAAAAYTGOBOGY4C4MJYGEXDCNZWHI2TIOBTCMAAAAAAAAAAAMJTHAXDMOBOGE4DCLRRG44TUNJUHAZRGAAAAAAAAAAAGEZTQLRWHAXDCOBRFYYTQMB2GU2DQMYTAAAAAAAAAAADCMZYFY3DQLRRHAYS4MJYGI5DKNBYGMJQAAAAAAAAAABRGM4C4NRYFYYTQMJOGI2DEORVGQ4DGEYAAAAAAAAAAAYTGOBOGY4C4MJYGEXDENBTHI2TIOBTCMAAAAAAAAAAAMJTHAXDMOBOGE4DCLRSGQ4TUNJUHAZREAAAAAAAAAAAGEZTQLRWHAXDCOBZFYYTIORVGQ4DGEQAAAAAAAAAAAYTGOBOGY4C4MJYHEXDCNJ2GU2DQMYSAAAAAAAAAAADCMZYFY3DQLRRHA4S4MJXHI2TIOBTCIAAAAAAAAAAAMJTHAXDMOBOGE4DSLRRHA5DKNBYGMJAAAAAAAAAAABRGM4C4NRYFYYTQOJOGE4TUNJUHAZREAAAAAAAAAAAGEZTQLRWHAXDCOBZFYZTCORVGQ4DGEQAAAAAAAAAAAYTGOBOGY4C4MJYHEXDGNB2GU2DQMYSAAAAAAAAAAADCMZYFY3DQLRRHA4S4MZWHI2TIOBTCIAAAAAAAAAAAMJTHAXDMOBOGE4DSLRTHA5DKNBYGMJAAAAAAAAAAABRGM4C4NRYFYYTQOJOGM4TUNJUHAZRCAAAAAAAAAAAGQ3C4MJQGEXDKLRRG44TUNJUHAZQC2YVAAAAAAAAAEDQAAAAAAAAAADBNRYGQYK7GIAFQXLAN3FR2BBK7UHAAOLMKFMW3BJBUV56OQEWWTVFMAW6GHV36Y4YHIAAAAAAAAABQAAAAAAAAAAAK4EPBDVXQZHYXWHJVIT4V3QAWOIRXH5YOVJHSOQBAAAAAAAAAAABKAAAAAAAAAAAMFYHA4ZPN52GQZLSKBZG6Z3SMFWW4YLNMXIYRFMERF3GPXIMLEUUNSMI52OE6TXB7B6KFFJERZK6VKS66S6QTGB2AAAAAAAAAAASAAAAAAAAAAAANDZOKQGWS7GNLJ4HTVNMATREDPGN4J7GOWKOJB24KVTJEN2GNJHBQAAAAAAAAAAAMBJ4AFZCH7MS5YLFIWQF3UJC7MMJ34ENEY35VJQAAUAAAAAAAAAAAAAAAAAACAAAAABAAAAAAMAAAAAEAAAAAAA' myApp.decode_ipc_msg(grantedAuth,None) grantedAuthPointer = myApp.queue.get() myApp.app_registered(b'otherProgramname',grantedAuthPointer[0],None) appPointer = myApp.queue.get() with open('myNewMutableData_as_bytes','rb') as f: mutableBytes = f.read() ffiMut = safenet.safe_utils.getffiMutable(mutableBytes,myApp.ffi_app) @myApp.ffi_app.callback("void(void* , FfiResult*, MDataKey*, uint64_t)") def result_mdata_list_keys(user_data, result, key, size): #print('results') #print('error code: ') #print(result.error_code) if result.error_code != 0: print(myApp.ffi_app.string(result.description)) #print(key.val_len) #print(size) if size>0: #print(key.val_len) print(myApp.ffi_app.string(key.key)) #returnDict = ffi.from_handle(user_data) #returnDict['myMdata']=mdataInfo @myApp.ffi_app.callback("void(void* , FfiResult*, MDataValue*, uint64_t)") def result_mdata_list_values(user_data, result, value, size): #print('results') #print('error code: ') #print(result.error_code) if result.error_code != 0: print(myApp.ffi_app.string(result.description)) #print(key.val_len) #print(size) if size>0: #print(value.content_len) print(myApp.ffi_app.string(value.content)) #returnDict = ffi.from_handle(user_data) #returnDict['myMdata']=mdataInfo myApp.lib.safe_app.mdata_list_keys(appPointer,ffiMut,myApp.ffi_app.NULL,result_mdata_list_keys) myApp.lib.safe_app.mdata_list_values(appPointer,ffiMut,myApp.ffi_app.NULL,result_mdata_list_values) myMutable = safenet.MutableData(mutableBytes) myMutable.mdata_list_values(appPointer,ffiMut,myApp.ffi_app.NULL) myApp.mutableData.mdata_list_values(appPointer,ffiMut,myApp.ffi_app.NULL) myMutable.mdata_list_values(appPointer,ffiMut,myApp.ffi_app.NULL,result_mdata_list_values) myApp.mutableData.mdata_list_values(appPointer,ffiMut,myApp.ffi_app.NULL,myApp.ffi_app.callback) myApp.mutableData.mdata_list_values(appPointer,ffiMut,myApp.ffi_app.NULL,myApp.ffi_app.callback,result_mdata_list_values) ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import pandas as pd dataset_train = pd.read_csv('Google_Price_Train_2004-2016.csv') #we concentrate on open price #with '1:2' and '.values', we get a the array of the 'open' column training_set = dataset_train.iloc[:, 1:2].values training_set.shape #feature scaling with normalisation from sklearn.preprocessing import MinMaxScaler #'sc' is an object of the MinMaxScaler class #the scaled stock prices will be between 0 and 1 sc = MinMaxScaler(feature_range = (0, 1)) #apply the scaler on the data training_set_scaled = sc.fit_transform(training_set) #create data structure with 60 timesteps and 1 output #timesteps means: for one output in time t, it will check the values #of the 60 moments before X_train = [] y_train = [] #60 is the starting point (we need the days before to create first training value) #3106 is the index of the last day for i in range(60, 3106): #get previous 60 values #'0' specifies the column (we only have one column) X_train.append(training_set_scaled[i-60:i, 0]) y_train.append(training_set_scaled[i, 0]) #make arrays from the lists X_train, y_train = np.array(X_train), np.array(y_train) #reshape data and add more dimensions #define number of indicators #currently we have one indicator ('open' stock price) #we for example can add stock prices of another company #'X_train.shape[0]' is the number of lines #'X_train.shape[1]' is the number of columns #'1' is the number of indicators (here we only take google stock price) X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) X_train.shape #import keras libraries & packages #that helps us to set up a sequence of layers from keras.models import Sequential #'Dense' produces the output layer from keras.layers import Dense #getting the LSTM layers from keras.layers import LSTM #use dropout to avoid overfitting from keras.layers import Dropout #initialize the RNN #'regressor' is used to predict a continuous value #'regressor' is an object of the 'Sequential' class regressor = Sequential() #add first LSTM layer and dropout regularization #LSTM object has 3 arguments: #1) number of units (neurons; LSTM cells) #2) return sequences: has to be true, cause we build a stacked LSTM #3) input shape (relates to shape of X_train; 2 last dimensions are enough) regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1))) #add dropout regularization #Dropout rate has one argument: #--> dropout rate (number of neurons, that should be switched off #during each iteration of the training) regressor.add(Dropout(0.2)) #2nd layer #we don't need to specifiy input shape anymore regressor.add(LSTM(units = 50, return_sequences = True)) regressor.add(Dropout(0.2)) #3nd layer regressor.add(LSTM(units = 50, return_sequences = True)) regressor.add(Dropout(0.2)) #4th layer #we remove 'return_sequences = True' and keep the default value --> 'false' regressor.add(LSTM(units = 50)) regressor.add(Dropout(0.2)) #add output layer regressor.add(Dense(units = 1)) #compile RNN with right optimizer #and right loss function #use 'compile' method of the 'sequential' class #different optimizers can be found on keras documentation regressor.compile(optimizer = 'adam', loss = 'mean_squared_error') #fit RNN to training set #'batch_size' means: in every epoch, the model takes 32 observations #and updates the weights accordingly # 3106 total observations / 32 observations per batch =^ 96 regressor.fit(X_train, y_train, epochs = 100, batch_size = 32) #save the model from keras.models import load_model regressor.save('reg_google_2004-2016.h5') #get real stock price of January 2017 dataset_test = pd.read_csv('Google_Stock_Price_Test.csv') real_stock_price = dataset_test.iloc[:, 1:2].values #get predicted stock price of 2017 #in order to predict stockprice of one day in Jan 17, #we need 60 previous days #for these 60 days, we need training and test set #we will concatenate the initial dataframes #then we will scale the values #we have to scale the input values #cause the rnn was trained on the scaled values #for vertical concatenation, we take: axis = 0 #for horizontal concatenation, we take: axis = 1 dataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis = 0) #get inputs of 60 previous days #first financial day that we want to predict is Jan 3 #we get the index with this expression: [len(dataset_total) - len(dataset_test)] #--> length of dataset_test is 20 #we get the lower bound with this expression: len(dataset_total) - len(dataset_test) - 60 #the upper bound is the last index of the whole dataset #with 'values' we get a numpy array #'inputs' gives us all information to predict values of Jan 2017 inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values #reshape to get right numpy shape #now we have the observations in lines and in one column inputs = inputs.reshape(-1, 1) #scale the inputs (but not the test values) #here we don't take the 'fit_transform' method, because #'sc' was already prepared inputs = sc.transform(inputs) #make 3d structure for test set X_test = [] #upper bound is 80: 60 + 20 (we have 20 financial days in the test set) for i in range(60, 80): #get previous 60 values for each of the stock prices in Jan 2017 #'0' specifies the column (we only have one column) X_test.append(inputs[i-60:i, 0]) #make arrays from the lists X_test = np.array(X_test) #reshape data and add more dimensions #define number of indicators #currently we have one indicator ('open' stock price) #we for example can add stock prices of another company #'X_train.shape[0]' is the number of lines #'X_train.shape[1]' is the number of columns #'1' is the number of indicators (here we only take google stock price) X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) ``` ## Get predictions based on training set from 2004 to 2016 ``` from keras.models import load_model regressor = load_model('reg_google_2004-2016.h5') #make predictions on the values of X_test (Jan 2017) predicted_stock_price = regressor.predict(X_test) #inverse scaling of predictions predicted_stock_price = sc.inverse_transform(predicted_stock_price) #visualize the results plt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price for Jan 2017') plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price for Jan 2017') plt.title('Google Stock Price Prediction (based on training \n with previous Stock Prices between 2004 & 2016)') plt.xlabel('Time') plt.ylabel('Stock Price') plt.legend() plt.show() import math from sklearn.metrics import mean_squared_error #divide rmse by 840 (range of stock price of Jan 2017) --> so we get a relative stock price rmse_04_16 = (math.sqrt(mean_squared_error(real_stock_price, predicted_stock_price))) / 840 rmse_04_16 #concat arrays horizontally arrays_concat = np.concatenate((real_stock_price, predicted_stock_price), axis = 1) real_val_list = arrays_concat[:,0].tolist() pred_val_list = arrays_concat[:,1].tolist() #loop through columns of array and check directions #if second value bigger than first value: give a 2 #if second value smaller than first value: give a 1 #get directions of real values direction_list_realval = [] n = 0 for x in real_val_list: if real_val_list[n] > real_val_list[n-1]: direction_list_realval.append(2) else: direction_list_realval.append(1) n = n + 1 #get directions of predicted values direction_list_predval = [] n = 0 for x in pred_val_list: if pred_val_list[n] > pred_val_list[n-1]: direction_list_predval.append(2) else: direction_list_predval.append(1) n = n + 1 #change lists to arrays real_val_array = np.array(real_val_list) direction_array_realval = np.array(direction_list_realval) pred_val_array = np.array(pred_val_list) direction_array_predval = np.array(direction_list_predval) #reshape to 2D array real_val_2d = np.reshape(real_val_array, (-1, 1)) direction_2d_realval = np.reshape(direction_array_realval, (-1, 1)) pred_val_2d = np.reshape(pred_val_array, (-1, 1)) direction_2d_predval = np.reshape(direction_array_predval, (-1, 1)) val_dir = np.concatenate((real_val_2d, direction_2d_realval, pred_val_2d, direction_2d_predval), axis = 1) #select direction columns from 2D array list_ind = [1, 3] val_bin = val_dir[:,list_ind] correct_list = [] for x in val_bin: if x[0] == x[1]: correct_list.append(1) else: correct_list.append(0) correct_list_array = np.reshape(correct_list, (-1, 1)) val_bin_correct = np.concatenate((val_bin, correct_list_array), axis = 1) dir_acc = sum(val_bin_correct[:,2]) / len(val_bin_correct) dir_acc ```
github_jupyter
# CX 4230, Spring 2016: [18] Discrete event simulation of a gas station Recall the introduction to queueing models and discrete event simulators from the last class: [link](https://t-square.gatech.edu/access/content/group/gtc-59b8-dc03-5a67-a5f4-88b8e4d5b69a/cx4230-sp16--17-queueing.pdf). In this notebook, you will implement it. ## Exponential random numbers Recall that in a queueing model, it is common to assume that customer interarrival times and service times are independent and identically distributed random variables. Classically, the most commonly assumed distribution is _exponential_. More specifically, an exponentially distributed random variable $X \sim \mathcal{E}(\lambda)$ has the probability density function, $$ f_X(x) = \lambda \cdot \exp\left(-\frac{x}{\lambda}\right), $$ where $\lambda$ is the mean of the distribution. Using Numpy, these are easy to generate using the function, `numpy.random.exponential()`: http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.random.exponential.html. Here is a quick demo. ``` from numpy.random import exponential X_MEAN = 10.0 X_COUNT = 5 x_values = exponential (X_MEAN, X_COUNT) print ("X ~ Exp(%g):" % X_MEAN) for (i, x_i) in enumerate (x_values): print (" X_%d = %g" % (i, x_i)) ``` As a sanity check, let's generate a large number of values and compare the sample mean to the desired (true) mean. ``` from numpy import mean # @Demo N_BIG = 1000 big_mean = mean (exponential (X_MEAN, N_BIG)) print ("\nSample mean of %d values: %g" % (N_BIG, big_mean)) ``` ## Priority queues To maintain the future event list, you need some kind of priority queue data structure. One classical choice is to use a heap, for which there is a standard implementation in Python: [link](http://www.bogotobogo.com/python/python_PriorityQueue_heapq_Data_Structure.php) Here's a quick demo. ``` from heapq import heappush, heappop, heapify # Method 1: Convert any Python list into a heap h1 = list (x_values) print ("Initial values:", h1) heapify (h1) print ("\nHeapified:", h1) print ("\nExtracting mins...") for i in range (len (h1)): print (i, ":", heappop (h1)) # Method 2: Insert values into the heap one at a time print ("Inserting...") h2 = [] for (i, x_i) in enumerate (x_values): print (i, ":", x_i) heappush (h2, x_i) print ("\nHeap:", h2) print ("\nExtracting minima...") for i in range (len (h2)): print (i, ":", heappop (h2)) ``` ## A generic discrete event simulation engine We can build a simple, generic discrete event simulation engine. This engine manages the future event list, which you'll recall is a priority queue of timestamped events. It continually removes the event with the lowest timestamp and processes it. Suppose we represent an event by a tuple, `(t, e)`, where `t` is the event's timestamp and `e` is an event handler. An event handler is simply a function. Let's suppose that this function takes two arguments, `e (t, s)`, where `t` is (again) the timestamp and `s` is the system state, encoded in an application-specific way. When `e (t, s)` executes, it may update the state `s`. **Exercise.** Complete the following function, which implements a generic discrete event simulation engine. The future events list is `events`. The initial system state is `initial_state`; the starter code below makes a copy of this state as a variable `s`, which your simulator can modify. Additionally, you should correct the last `print()` statement so that instead of a pair of `None` values it prints the timestamp and event name (name of the event handler). ``` from copy import deepcopy def simulate (events, initial_state): s = deepcopy (initial_state) print ("\nFuture event list:\n%s" % str (events)) print ("\nt=0: %s" % str (s)) while events: # @YOUSE: Get event and process it (t, e) = heappop (events) e (t, s) #print ("t=%d: event '%s' => '%s'" % (None, None, str (s))) print ("t=%d: event '%s' => '%s'" % (t, e.__name__, str (s))) ``` ## Instantiating the simulator For the gas station model, we asked you to assume the interarrival times, pumping times, and shopping times were exponential i.i.d. random variables. So, let's start by defining some parameters for these distributions. Let's also pre-generate some number of car arrivals. ``` # Event parameters MEAN_INTERARRIVAL_TIME = 15.0 # minutes MEAN_PUMPING_TIME = 5.0 # minutes MEAN_SHOPPING_TIME = 10.0 # minutes # Number of customers (cars) NUM_CARS = 5 # Pre-generate some interarrival times car_interarrival_times = exponential (MEAN_INTERARRIVAL_TIME, NUM_CARS) print ("Interrival times (in minutes) of all cars:\n", car_interarrival_times) ``` Recall that the state consists of the logical simulation time (`now`) and three state variables: `AtPump`, `AtStore`, and `PumpFree`. Let's create this state. ``` now = 0.0 # Current (logical) simulation time state = {'AtPump': 0 # no. cars at pump or waiting , 'AtStore': 0 # no. cars at store , 'PumpFree': True # True <==> pump is available } ``` Let's represent an _event_ as a tuple, `(t, e)`, where `t` is the timestamp of the event and `e` is an event handler, implemented as a Python function. If the future event list is stored in a global priority queue called `events`, the following function will insert an event into that queue. ``` def schedule (t, e): """ Schedules a new event `e` at time `t`. """ global events print (" ==> '%s' @ t=%g" % (e.__name__, t)) heappush (events, (t, e)) ``` **Exercise.** Implement an event handler to process a car arrival event. Assume that event handlers take as input the timestamp `t` of the event and the state `s` of the system at time `t`. ``` def arrives (t, s): """ Processes an arrival event at time `t` for a system in state `s`. Schedules a pumping event if the pump is free. Returns the new system state. """ # @YOUSE s['AtPump'] += 1 if s['PumpFree']: s['PumpFree'] = False schedule (t + exponential (MEAN_PUMPING_TIME), finishes) return s ``` **Exercise.** Implement a function to process the event for a car that finishes pumping gas. ``` def finishes (t, s): """ Processes a finished-pumping event at time `t` for a system in state `s`. Schedules a pumping event if any cars are waiting. Returns the new system state. """ # @YOUSE s['AtPump'] -= 1 s['AtStore'] += 1 schedule (t + exponential (MEAN_SHOPPING_TIME), departs) if s['AtPump'] > 0: schedule (t + exponential (MEAN_PUMPING_TIME), finishes) else: s['PumpFree'] = True return s ``` **Exercise.** Implement a function to process the event for a car that leaves the store. ``` def departs (t, s): """ Processes a departure from the station event at time `t` for a system in state `s`. """ # @YOUSE s['AtStore'] -= 1 return s ``` **Exercise.** Create an initial future events list by converting the raw interarrival times into arrival events and inserting them into the future events list. ``` # Hint: This function may prove useful from numpy import cumsum events = [] # Future event list, initially empty # @YOUSE: Create initial events from all car arrivals for t in cumsum (car_interarrival_times): schedule (t, arrives) # Test code print ("\nContents of `events[:]`:") for (i, event) in enumerate (events): print ("[%d] t=%g: %s" % (i, event[0], event[1].__name__)) # More test code: If everything worked, so should this simulation! simulate (events, state) ```
github_jupyter
# Convolution Neural Network with Trained Word2Vec Embeddings for Part-Of-Speech Tagging - English ________________________________________ ## Create the Dataset ``` import nltk # Natural Language Toolkit's Brown, Treebank, and Conll2000 Corpora with universal tagset nltk.download('brown') nltk.download('treebank') nltk.download('conll2000') nltk.download('universal_tagset') from nltk.corpus import brown from nltk.corpus import treebank from nltk.corpus import conll2000 treebank_corpus = treebank.tagged_sents(tagset='universal') brown_corpus = brown.tagged_sents(tagset='universal') conll_corpus = conll2000.tagged_sents(tagset='universal') # Combine the corpora into a single dataset tagged_sentences = treebank_corpus + brown_corpus + conll_corpus # Split data into lists of sentences and tags X = [] Y = [] for sentence in tagged_sentences: X_sentence = [] Y_sentence = [] for pair in sentence: X_sentence.append(pair[0]) # the word Y_sentence.append(pair[1]) # the tag X.append(X_sentence) Y.append(Y_sentence) # Look at the data print("First Sentence:\n") print(X[0]) print("\nIt's Tags:\n") print(Y[0]) ``` #### Some information about the data: ``` num_words = len(set([word.lower() for sentence in X for word in sentence])) tags = set([word.lower() for sentence in Y for word in sentence]) num_tags = len(tags) print(num_words) print(num_tags) print(tags) ``` #### Tags: Conjunction, Punctuation, Numeral, Adverb, Verb, Noun, Pronoun, Adjective, Participle, Determiner, X(Other), Adposition ``` tagdict = { "conj": "Conjunction", ".": "Punctuation", "propn": "Proper Noun", "num": "Numeral", "adv": "Adverb", "verb": "Verb", "noun": "Noun", "pron": "Pronoun", "adj": "Adjective", "part": "Participle", "det": "Determiner", "x": "Other", "adp": "Adposition", } ``` ______________________________ ## Prepare the Data ``` import numpy as np import tensorflow as tf import tensorflow.keras as keras from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.utils.np_utils import to_categorical from gensim.models import KeyedVectors from sklearn.model_selection import train_test_split ``` #### Tokenize and pad each sentence and it's tags: ``` # encode X word_tokenizer = Tokenizer() # instantiate tokeniser word_tokenizer.fit_on_texts(X) # fit tokeniser on data # use the tokenizer to encode input sentence sequence X_encoded = word_tokenizer.texts_to_sequences(X) # encode Y tag_tokenizer = Tokenizer() tag_tokenizer.fit_on_texts(Y) # use the tokenizer to encode input tag sequence Y_encoded = tag_tokenizer.texts_to_sequences(Y) # save the tokenizers for use in our demo import io import json wtokenizer_json = word_tokenizer.to_json() ttokenizer_json = tag_tokenizer.to_json() with io.open('wtokenizer.json', 'w', encoding='utf-8') as f: f.write(json.dumps(wtokenizer_json, ensure_ascii=False)) with io.open('ttokenizer.json', 'w', encoding='utf-8') as f: f.write(json.dumps(ttokenizer_json, ensure_ascii=False)) # sequences greater than 50 in length will be truncated MAX_SEQ_LENGTH = 50 X_padded = pad_sequences(X_encoded, maxlen=MAX_SEQ_LENGTH, padding='pre', truncating='post') Y_padded = pad_sequences(Y_encoded, maxlen=MAX_SEQ_LENGTH, padding='pre', truncating='post') ``` #### Split Data into Training and Testing Data ``` X_train, X_test, Y_train, Y_test = train_test_split(X_padded, Y_padded, test_size=0.15, random_state=4) ``` #### Create an Embedding Matrix using Word2Vec: ``` import gensim.downloader glove_vectors = gensim.downloader.load('word2vec-google-news-300') from gensim.models import KeyedVectors # word2vec path = "gensim-data1/word2vec-google-news-300/word2vec-google-news-300.gz" # load word2vec using the following function present in the gensim library word2vec = KeyedVectors.load_word2vec_format(path, binary=True) # assign word vectors from word2vec model # each word in word2vec model is represented using a 300 dimensional vector EMBEDDING_SIZE = 300 VOCABULARY_SIZE = len(word_tokenizer.word_index) + 1 # create an empty embedding matix embedding_weights = np.zeros((VOCABULARY_SIZE, EMBEDDING_SIZE)) # create a word to index dictionary mapping word2id = word_tokenizer.word_index # copy vectors from word2vec model to the words present in corpus for word, index in word2id.items(): try: embedding_weights[index, :] = word2vec[word] except KeyError: pass ``` #### One-hot encode Y: ``` # use Keras' to_categorical function to one-hot encode Y Y_train = to_categorical(Y_train) Y_test = to_categorical(Y_test) ``` #### Data Shapes: ``` print("Training shapes:\n") print(X_train.shape) print(Y_train.shape) print("\nTesting Shapes:\n") print(X_test.shape) print(Y_test.shape) ``` ___________________________ ## Build the Model ``` from keras.models import Model from keras.models import Sequential from keras import layers from tensorflow.keras import backend as K ``` #### Function to ignore the accuracy of paddings: ##### From: https://nlpforhackers.io/lstm-pos-tagger-keras/ ``` def accuracy_masked(y_true, y_pred): y_true_class = K.argmax(y_true, axis=-1) y_pred_class = K.argmax(y_pred, axis=-1) ignore_mask = K.cast(K.not_equal(y_true_class, 0), 'int32') matches = K.cast(K.equal(y_true_class, y_pred_class), 'int32') * ignore_mask accuracy = K.sum(matches) / K.maximum(K.sum(ignore_mask), 1) return accuracy ``` #### Sequential Convolution Network: ``` model = Sequential() model.add(layers.Embedding(input_dim = VOCABULARY_SIZE, # vocabulary size - number of unique words in data output_dim = EMBEDDING_SIZE, # length of vector with which each word is represented input_length = MAX_SEQ_LENGTH, # length of input sequence weights = [embedding_weights], # word embedding matrix trainable = True # True - update embeddings_weight matrix )) model.add(layers.Conv1D(128, kernel_size=2, padding='same', activation='relu')) model.add(layers.MaxPooling1D(pool_size=1, strides=1, padding='same')) model.add(keras.layers.Dropout(0.25)) model.add(keras.layers.Dense(128, activation='relu')) model.add(keras.layers.Dropout(0.5)) model.add(keras.layers.Dense(128, activation='relu')) model.add(keras.layers.Dropout(0.5)) model.add(keras.layers.Dense(13, activation='softmax')) model.compile(loss=keras.losses.CategoricalCrossentropy(), optimizer=keras.optimizers.Adam(), metrics=[keras.metrics.CategoricalAccuracy(), accuracy_masked]) model.summary() keras.utils.plot_model(model, to_file='model.png', show_shapes=True) ``` __________________________ ## Train the Model ``` history = model.fit(X_train, Y_train, epochs=10, validation_split = 0.2, batch_size=128) tf.keras.models.save_model( model, "englishModel", overwrite=True, include_optimizer=True, save_format='tf', signatures=None, options=None) ``` _____________________________ ## Results: ``` import matplotlib.pyplot as plt %matplotlib inline # Evaluate the final performance of the network score = model.evaluate(X_test,Y_test,verbose=1) print("Loss:",score[0]) print("Accuracy:",score[1]) print("Masked Accuracy: ", score[2]) plt.figure(1) # summarize history for accuracy plt.subplot(211) plt.plot(history.history['categorical_accuracy']) plt.plot(history.history['val_categorical_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') # summarize history for loss plt.subplot(212) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.tight_layout() plt.show() plt.figure(1) # summarize history for masked accuracy plt.subplot(211) plt.plot(history.history['accuracy_masked']) plt.plot(history.history['val_accuracy_masked']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') ``` _____________________________________________ ## Demo ``` from nltk.tokenize import word_tokenize def output_prediction(text): # Pre-process data like we did for training. text = [word_tokenize(text)] text_encoded = word_tokenizer.texts_to_sequences(text) text_padded = pad_sequences(text_encoded, maxlen=MAX_SEQ_LENGTH, padding='pre', truncating='post') # Make a prediction ynew = np.argmax(model.predict(text_padded), axis=-1) prediction = ynew[0] # Trim leading zeros prediction = np.trim_zeros(prediction) # Decode prediction decoded = tag_tokenizer.sequences_to_texts([prediction]) decoded = word_tokenize(decoded[0]) decoded = [tagdict[tag] for tag in decoded] # show the inputs and predicted outputs print(" Sentence= %s\nPredicted Tags= %s" % (text, decoded)) # Type your own sentence in place of the one below! text = "The red cat took a very long walk along the winding river." output_prediction(text) ``` __________________________________________________________________ Data Pre-Processing Inspired By: https://towardsdatascience.com/pos-tagging-using-rnn-7f08a522f849, https://www.nltk.org/nltk_data/, https://www.nltk.org/ Model Inspired By: https://medium.com/saarthi-ai/sentence-classification-using-convolutional-neural-networks-ddad72c7048c Masked Accuracy Class Inspired By: https://nlpforhackers.io/lstm-pos-tagger-keras/ Word Embeddings: https://www.tensorflow.org/tutorials/text/word2vec
github_jupyter
# Welter issue #17 ## Teff vs order ### Part 2: Make a Table for the multi temps and fill factors Michael Gully-Santiago Monday, June 28, 2016 See [Issue17](https://github.com/BrownDwarf/welter/issues/17) ``` import warnings warnings.filterwarnings("ignore") import numpy as np from astropy.io import fits import matplotlib.pyplot as plt % matplotlib inline % config InlineBackend.figure_format = 'retina' import seaborn as sns sns.set_context('notebook') import pandas as pd ``` ## Retrieve aggregated data for multi-$T_{\rm eff}$ Starfish runs for all spectral orders. ``` #! head ../data/analysis/IGRINS_mix_emcee_last200.csv IG_raw = pd.read_csv('../data/analysis/IGRINS_mix_K_Run01_H_Run02.csv') IG_raw.head() ``` It's missing the logOmega2 columns for some reason... ## Plot of $T_{eff}$ vs. spectral order ``` orders = IG_raw N_orders = len(orders) sns.set_style('ticks') sns.set_context('paper') #plt.subplot(211) fig = plt.figure(figsize=(8.5, 3.5)) ax1 = fig.add_axes([0.35, 0.7, 0.45, 0.2],xticks=[], yticks=[0.0, 0.5, 1]) #ax1.fill_between(tell.wls, tell.trans, y2=1.0, alpha=0.5) ax1.set_xlim(14000, 26000) ax1.set_ylim(-0.3, 1) ax1.set_ylabel('$\oplus$ trans.') for i in range(N_orders): x = [orders.wl_start[i], orders.wl_end[i]] y = [orders.tell_trans[i]]*2 ax1.plot(x, y, 'k-', alpha=0.5) #text_pos = 500.0 + 20.0*np.arange(N_orders) for i in range(N_orders): print_every = 2 if orders.m_val[i] > 99: print_every = 3 if (orders.number.values[i] % print_every) == 0: ax1.text(orders.wl_center[i], -0.07, '{}'.format(orders.m_val.values[i]), fontsize=6, rotation=90) ax = fig.add_axes([0.35, 0.1, 0.45, 0.6]) ax.errorbar(orders.wl_center, orders.Teff_50p, yerr=(orders.Teff_50p-orders.Teff_05p, orders.Teff_95p-orders.Teff_50p), ecolor='r', capthick=1, fmt='k.', alpha=0.7, label='Hot: $T_{\mathrm{eff}, a}$') ax.errorbar(orders.wl_center, orders.Teff2_50p, yerr=(orders.Teff2_50p-orders.Teff2_05p, orders.Teff2_95p-orders.Teff2_50p), ecolor='b', capthick=1, fmt='k.', alpha=0.7, label='Cool: $T_{\mathrm{eff}, b}$') ax.set_ylim(2100, 4600) ax.set_xlim(14000, 26000) ax.set_ylabel('$T_{eff}$ (K)') ax.set_xlabel('$\lambda \,(\AA $)') ax.legend(loc='lower left') plt.savefig('../document/figures/LkCa4_HK_mixTeff_by_order.pdf', bbox_inches='tight') vals = orders.Teff2_50p == orders.Teff2_50p vals.sum() len(orders) ``` ## Add multi-Teff rows to the Latex Table of $T_{\rm eff}$ vs. order. ``` latex_out = pd.read_csv('../data/analysis/latex_TeffOrder_table.csv') latex_new = pd.DataFrame() latex_out.tail() ``` ### Merge single and double results ``` finite_orders = orders[orders.Teff_50p == orders.Teff_50p] fo = finite_orders.sort_values('wl_center') latex_new['Order'] = fo.m_val fo['plus_error1'] = fo.Teff_95p-fo.Teff_50p fo['minus_error1'] = fo.Teff_50p-fo.Teff_05p fo['plus_error2'] = fo.Teff2_95p-fo.Teff2_50p fo['minus_error2'] = fo.Teff2_50p-fo.Teff2_05p fo['f_plus_error'] = fo.ff_95p-fo.ff_50p fo['f_minus_error'] = fo.ff_50p-fo.ff_05p latex_new['Teff1'] = ('$'+fo.Teff_50p.apply(lambda x:"{}".format(int(x)))+'^{+'+ fo.plus_error1.apply(lambda x:"{}".format(int(x)))+'}_{-'+ fo.minus_error1.apply(lambda x:"{}".format(int(x)))+'}$') latex_new['Teff2'] = ('$'+fo.Teff2_50p.apply(lambda x:"{}".format(int(x)))+'^{+'+ fo.plus_error2.apply(lambda x:"{}".format(int(x)))+'}_{-'+ fo.minus_error2.apply(lambda x:"{}".format(int(x)))+'}$') latex_new['ff'] = ('$'+fo.ff_50p.apply(lambda x:"{:0.2f}".format(x))+'^{+'+ fo.f_plus_error.apply(lambda x:"{:0.2f}".format(x))+'}_{-'+ fo.f_minus_error.apply(lambda x:"{:0.2f}".format(x))+'}$') fo['wavelength_range'] = (fo.wl_start.apply(lambda x:"{}".format(int(np.floor(x))))+'$-$'+ fo.wl_end.apply(lambda x:"{}".format(int(np.ceil(x))))) latex_new['wavelength_range'] = fo.wavelength_range latex_merge = pd.merge(latex_out.drop(['Teff1', 'Teff2', 'f'], axis=1), latex_new, how='outer', on='Order') latex_merge.Order = latex_merge.Order.astype(int) ``` There's some missing data. We only added IGRINS data, and the wavelength ranges are the same. ``` latex_merge.Instrument[latex_merge.Instrument!=latex_merge.Instrument] = 'IGRINS' bi = latex_merge.wavelength_range_x != latex_merge.wavelength_range_x latex_merge.wavelength_range_x[bi] = latex_merge.wavelength_range_y[bi] latex_merge = latex_merge.rename(columns={"wavelength_range_x":"wavelength_range"}).drop(['wavelength_range_y'], axis=1) ``` Sort by wavelength. ``` columns_in_order = latex_merge.columns igr = latex_merge.Instrument == 'IGRINS' esp = latex_merge.Instrument == 'ESPaDoNs' latex_merge = pd.concat([latex_merge[esp], pd.merge(latex_merge[igr], fo[['wl_center', 'm_val']], how='outer', left_on="Order", right_on="m_val" ).sort_values("wl_center")] ).drop(['m_val', 'wl_center'], axis=1)[columns_in_order].reset_index(drop=True) latex_merge.fillna(value='$\cdots$')[43:50] latex_merge.to_latex('../document/tables/tbl_order_results_all_raw.tex', na_rep='$\cdots$', index=False, escape=False) ``` ## The end.
github_jupyter
### First time series - create a sequence of dates using pd.date_range() - each date in the resulting pd.DatetimeIndex is a pd.Timestamp with various attributes that you can access to obtain information about the date. ``` import pandas as pd from datetime import datetime import matplotlib.pyplot as plt # Create the range of dates here seven_days = pd.date_range(start='2017-1-1', periods=12, freq='D') # Iterate over the dates and print the number and name of the weekday for day in seven_days: print(day.dayofweek, day.day_name) ``` #### Create a time series of air quality data ``` data = pd.read_csv('./data/nyc.csv') # Inspect data print(data.info()) # Convert the date column to datetime64 data['date'] = pd.to_datetime(data['date']) # Set date column as index data.set_index('date', inplace=True) # Inspect data print(data.info()) # Plot data data.plot(subplots=True) plt.show() ``` #### Compare annual stock price trends ``` yahoo = pd.read_csv('data/yahoo.csv') yahoo.date = pd.to_datetime(yahoo.date) yahoo.set_index('date', inplace=True) yahoo.head(3) # Create dataframe prices here prices = pd.DataFrame() # Select data for each year and concatenate with prices here for year in ['2013', '2014', '2015']: price_per_year = yahoo.loc[year, ['price']].reset_index(drop=True) # print(price_per_year) price_per_year.rename(columns={'price': year}, inplace=True) prices = pd.concat([prices, price_per_year], axis=1) # Plot prices prices.plot() plt.show() ``` #### Set and change time series frequency - We have seen how to assign a frequency to a DateTimeIndex, and then change this frequency. - set the frequency to calendar daily and then resample to monthly frequency, and visualize both series to see how the different frequencies affect the data. ``` co = pd.read_csv('data/co_cities.csv') co.date = pd.to_datetime(co.date) # convert str to datetime64 co.set_index('date', inplace=True) co.head(3) # Inspect data print(co.info()) # Set the frequency to calendar daily co = co.asfreq('D') # Plot the data co.plot(subplots=True) plt.show() # Set frequency to monthly co = co.asfreq('M') # Plot the data co.plot(subplots=True) plt.show() ``` #### Shifting stock prices across time - `.shift()` allows us to shift all values in a Series or DataFrame by a number of periods to a different time along the DateTimeIndex. - Let's use this to visually compare a stock price series for Google shifted 90 business days into both past and future. ``` google = pd.read_csv('./data/google.csv', parse_dates=['Date'], index_col='Date') # print(google.head(3)) # Set data frequency to business daily google = google.asfreq('B') # Create 'lagged' and 'shifted' that contain the Close shifted by 90 business days into past and future, respectively. google['lagged'] = google.Close.shift(periods=-90) google['shifted'] = google.Close.shift(periods=90) # Plot the google price series google.plot(subplots=True) plt.show() ``` ### Calculating stock price changes - We have learnt how to calculate returns using current and shifted prices as input. Now we'll practice a similar calculation to calculate absolute changes from current and shifted prices, and compare the result to the function `.diff()` ``` yahoo = pd.read_csv('./data/yahoo.csv') # Created shifted_30 here yahoo['shifted_30'] = yahoo.price.shift(periods=30) # Subtract shifted_30 from price yahoo['change_30'] = yahoo.price.sub(yahoo.shifted_30) # Get the 30-day price difference yahoo['diff_30'] = yahoo.price.diff(periods=30) # Inspect the last five rows of price print(yahoo['price'].tail()) # Show the value_counts of the difference between change_30 and diff_30 print(yahoo['diff_30'].sub(yahoo['change_30']).value_counts()) ``` ### Plotting multi-period returns - Use `.pct_change()` to calculate returns for various calendar day periods, and plot the result to compare the different patterns. ``` # Create daily_return google['daily_return'] = google.Close.pct_change(periods=1).mul(100) # Create monthly_return google['monthly_return'] = google.Close.pct_change(periods=30).mul(100) # Create annual_return google['annual_return'] = google.Close.pct_change(periods=360).mul(100) # Plot the result google.plot(subplots=True) plt.show() ``` ### Compare the performance of several asset classes - We can easily compare several time series by normalizing their starting points to 100, and plot the result. - To broaden our perspective on financial markets, let's compare four key assets: stocks, bonds, gold, and oil. ``` prices = pd.read_csv('./data/asset_classes.csv', parse_dates=['DATE'], index_col='DATE') # Inspect prices here print(prices.info()) prices.head(3) # Select first prices first_prices = prices.iloc[0] # Create normalized normalized = prices.div(first_prices).mul(100) # Plot normalized normalized.plot() plt.show() ``` - Normalizing series is a common step in time series analysis. ### Comparing stock prices with a benchmark ``` # Import stock prices and index here stocks = pd.read_csv('./data/nyse.csv', parse_dates=['date'], index_col='date') dow_jones = pd.read_csv('./data/dow_jones.csv', parse_dates=['date'], index_col='date') # Concatenate data and inspect result here data = pd.concat([stocks, dow_jones], axis=1) print(data.info()) # Normalize and plot your data here data.div(data.iloc[0]).mul(100).plot() plt.show() ``` ### Plot performance difference vs benchmark index - compare the performance of Microsoft (MSFT) and Apple (AAPL) to the S&P 500 over the last 10 years. ``` # Create tickers tickers = ['MSFT', 'AAPL'] # Import stock data here stocks = pd.read_csv('./data/msft_aapl.csv', parse_dates=['date'], index_col='date') # Import index here sp500 = pd.read_csv('./data/sp500.csv', parse_dates=['date'], index_col='date') # Concatenate stocks and index here data = pd.concat([stocks, sp500], axis=1).dropna() # Normalize data normalized = data.div(data.iloc[0]).mul(100) # Subtract the normalized index from the normalized stock prices, and plot the result normalized[tickers].sub(normalized['SP500'], axis=0).plot() plt.show() ``` ### Convert monthly to weekly data - We have learnt to use `.reindex()` to conform an existing time series to a DateTimeIndex at a different frequency. - Use this method by creating monthly data and then converting this data to weekly frequency while applying various fill logic options. ``` # Set start and end dates start = '2016-1-1' end = '2016-2-29' # Create monthly_dates here monthly_dates = pd.date_range(start, end, freq='M') # Create and print monthly here monthly = pd.Series(data=[1,2,], index=monthly_dates) print(monthly) # Create weekly_dates here weekly_dates = pd.date_range(start, end, freq='W') # Print monthly, reindexed using weekly_dates print(monthly.reindex(weekly_dates)) print('----'+'Reindex to weekly dates with back fill'+'-----------') print(monthly.reindex(weekly_dates, method='bfill')) print('----'+'Reindex to weekly dates with forward fill'+'-----------') print(monthly.reindex(weekly_dates, method='ffill')) ``` ### Create weekly from monthly unemployment data ``` # Import data here data = pd.read_csv('./data/unrate.csv', parse_dates=['DATE'], index_col='DATE') data.head() # Show first five rows of weekly series print(data.asfreq('W').head()) # Show first five rows of weekly series with bfill option print(data.asfreq('W', method='bfill').head()) # Create weekly series with ffill option and show first five rows weekly_ffill = data.asfreq('W', method='ffill') print(weekly_ffill.head()) # Plot weekly_fill starting 2015 here weekly_ffill.loc['2015':].plot() plt.show() ``` ### Upsampling & interpolation with .resample() #### Use interpolation to create weekly employment data - We have used the civilian US unemployment rate, and converted it from monthly to weekly frequency using **simple forward or backfill methods**. - Compare the previous approach to the new **.interpolate()** method. ``` monthly = pd.read_csv('./data/unrate.csv', parse_dates=['DATE'], index_col='DATE') monthly.head(3) # Inspect data here print(monthly.info()) # Create weekly dates weekly_dates = pd.date_range(monthly.index.min(), monthly.index.max(),freq='W') # Reindex monthly to weekly data weekly = monthly.reindex(weekly_dates) # Create ffill and interpolated columns weekly['ffill'] = weekly.UNRATE.ffill() weekly['interpolated'] = weekly.UNRATE.interpolate() # Plot weekly weekly.plot() plt.show() ``` #### Interpolating is a useful way to create smoother time series when resampling. #### Interpolate debt/GDP and compare to unemployment - Apply interpolate to quarterly debt/GDP series, and compare the result to the monthly unemployment rate. ``` # Import & inspect data here data = pd.read_csv('data/debt_unemployment.csv', parse_dates=['date'], index_col='date') print(data.info()) # Interpolate and inspect here interpolated = data.interpolate() print(interpolated.info()) # Plot interpolated data here interpolated.plot(secondary_y='Unemployment') plt.show() ``` ### Compare weekly, monthly and annual ozone trends for NYC & LA ``` # Import and inspect data here ozone = pd.read_csv('./data/ozone_nyla.csv', parse_dates=['date'], index_col='date') print(ozone.info()) # Calculate and plot the weekly average ozone trend ozone.resample('W').mean().plot() plt.show() # Calculate and plot the monthly average ozone trend ozone.resample('M').mean().plot() plt.show() # Calculate and plot the annual average ozone trend ozone.resample('A').mean().plot() plt.show() ``` ### Compare monthly average stock prices for Facebook and Google ``` # Import and inspect data here stocks = pd.read_csv('./data/goog_fb.csv', parse_dates=['date'], index_col=['date']) print(stocks.info()) # Calculate and plot the monthly averages monthly_average = stocks.resample('M').mean() monthly_average.head() monthly_average.plot(subplots=True) plt.show() ``` ### Compare quarterly GDP growth rate and stock returns - With our new skill to downsample and aggregate time series, we can compare higher-frequency stock price series to lower-frequency economic time series. ``` # Import and inspect gdp_growth here gdp_growth = pd.read_csv('./data/gdp_growth.csv', parse_dates=['date'], index_col='date') gdp_growth.head(3) # Import and inspect djia here djia = pd.read_csv('./data/djia.csv', parse_dates=['date'], index_col='date') djia.head(3) # Calculate djia quarterly returns here djia_quarterly = djia.resample('QS').first() djia_quarterly.head(3) djia_quarterly_return = djia_quarterly.pct_change().mul(100) djia_quarterly_return.head() # Concatenate, rename and plot djia_quarterly_return and gdp_growth here data = pd.concat([gdp_growth, djia_quarterly_return], axis=1) data.columns = ['gdp', 'djia'] data.plot() plt.show(); ``` ### Visualize monthly mean, median and standard deviation of S&P500 returns ``` # Import data here sp500 = pd.read_csv('./data/sp500.csv', parse_dates=['date'], index_col='date') sp500.head(3) # Calculate daily returns here daily_returns = sp500.squeeze().pct_change() daily_returns.head(3) # Resample and calculate statistics stats = daily_returns.resample('M').agg(['mean','median','std']) stats.head(3) # Plot stats here stats.plot() plt.show() ``` #### Rolling average air quality since 2010 for new york city ``` # Import and inspect ozone data here data = pd.read_csv('./data/ozone_nyc.csv', parse_dates=['date'], index_col='date') print(data.info()) # Calculate 90d and 360d rolling mean for the last price data['90D'] = data.Ozone.rolling(window='90D').mean() data['360D'] = data.Ozone.rolling(window='360D').mean() # Plot data data.loc['2010':].plot(title='New York City') plt.show() ``` ### Rolling 360-day median & std. deviation for nyc ozone data since 2000 - The daily data are very volatile, so using a longer term rolling average can help reveal a longer term trend. ``` # Import and inspect ozone data here data = pd.read_csv('./data/ozone_nyc.csv', parse_dates=['date'], index_col='date').dropna() # Calculate the rolling mean and std here rolling_stats = data.Ozone.rolling(360).agg(['mean', 'std']) # Join rolling_stats with ozone data stats = data.join(rolling_stats) # Plot stats stats.plot(subplots=True); plt.show() ``` ### Rolling quantiles for daily air quality in nyc - calculate rolling quantiles to describe changes in the dispersion of a time series over time in a way that is **less sensitive to outliers** than using the mean and standard deviation. - calculate rolling quantiles - at 10%, 50% (median) and 90% - of the distribution of daily average ozone concentration in NYC using a 360-day rolling window. ``` # Resample, interpolate and inspect ozone data here data = data.resample('D').interpolate() print(data.info()) # Create the rolling window rolling = data.Ozone.rolling(window=360) # Insert the rolling quantiles to the monthly returns data['q10'] = rolling.quantile(0.1).to_frame('q10') data['q50'] = rolling.median().to_frame('median') data['q90'] = rolling.quantile(0.9).to_frame('q90') # Plot the data data.plot() plt.show() ``` #### The rolling quantiles help show the volatility of the series. ### Cumulative sum vs .diff() ``` data = pd.read_csv('./data/amazon_close.csv', parse_dates=['date'], index_col='date') data.head(2) # Calculate differences differences = data.diff().dropna() # Select start price start_price = data.first('D') print(start_price) # Calculate cumulative sum cumulative_sum = start_price.append(differences).cumsum() # Validate cumulative sum equals data print(data.equals(cumulative_sum)) ```
github_jupyter
# AIT Development notebook ## notebook of structure |#|area name|cell num|description|edit or not| |---|---|---|---|---| | 1|flags set|1|setting of launch jupyter or ait flag.|no edit| | 2|ait-sdk install|1|Use only jupyter launch.<br>find ait-sdk and install.|no edit| | 3|create requirements and pip install|3|Use only jupyter launch.<br>create requirements.txt.<br>And install by requirements.txt.|should edit(second cell, you set use modules.)| | 4|import|2|you should write use import modules.<br>but bottom lines do not edit.|should edit(first cell, you import your moduel.)| | 5|create manifest|1|Use only jupyter launch.<br>create ait.manifest.json.|should edit| | 6|create input|1|Use only jupyter launch.<br>create ait.input.json.|should edit| | 7|initialize|1|this cell is initialize for ait progress.|no edit| | 8|functions|N|you defined measures, resources, downloads in ait.manifesit.json. <br>Define any functions to add these.|should edit| | 9|main|1|Read the data set or model and calls the function defined in `functions-area`.|should edit| |10|entrypoint|1|Call the main function.|no edit| |11|license attribute set|1|Use only notebook launch.<br>Setting attribute for license.|should edit| |12|prepare deploy|1|Use only notebook launch.<br>Convert to python programs and create dag.py.|no edit| ## notebook template revision history ### 1.0.1 2020/10/21 * add revision history * separate `create requirements and pip install` editable and noeditable * separate `import` editable and noeditable ### 1.0.0 2020/10/12 * new cerarion ``` ######################################### # area:flags set # do not edit ######################################### # Determine whether to start AIT or jupyter by startup argument import sys is_ait_launch = (len(sys.argv) == 2) ######################################### # area:ait-sdk install # do not edit ######################################### if not is_ait_launch: # get ait-sdk file name from pathlib import Path from glob import glob import re def numericalSort(value): numbers = re.compile(r'(\d+)') parts = numbers.split(value) parts[1::2] = map(int, parts[1::2]) return parts latest_sdk_file_path=sorted(glob('../lib/*.whl'), key=numericalSort)[-1] ait_sdk_name = Path(latest_sdk_file_path).name # copy to develop dir import shutil current_dir = %pwd shutil.copyfile(f'../lib/{ait_sdk_name}', f'{current_dir}/{ait_sdk_name}') # install ait-sdk !pip install --upgrade pip !pip install --force-reinstall ./$ait_sdk_name ######################################### # area:create requirements and pip install # do not edit ######################################### if not is_ait_launch: from ait_sdk.common.files.ait_requirements_generator import AITRequirementsGenerator requirements_generator = AITRequirementsGenerator() ######################################### # area:create requirements and pip install # should edit ######################################### if not is_ait_launch: requirements_generator._package_list = [] requirements_generator.add_package('matplotlib', '3.3.0') requirements_generator.add_package('numpy', '1.18.5') requirements_generator.add_package('pandas', '1.1.0') requirements_generator.add_package('scikit-learn', '0.23.2') requirements_generator.add_package('scipy', '1.4.1') requirements_generator.add_package('seaborn', '0.10.1') requirements_generator.add_package('sklearn', '0.0') requirements_generator.add_package('tensorflow', '2.3.0') requirements_generator.add_package('tensorflow-estimator', '2.3.0') ######################################### # area:create requirements and pip install # do not edit ######################################### if not is_ait_launch: requirements_generator.add_package(f'./{ait_sdk_name}') requirements_path = requirements_generator.create_requirements(current_dir) !pip install -r $requirements_path ######################################### # area:import # should edit ######################################### # import if you need modules cell from typing import List import tensorflow as tf import tensorflow.keras.backend as K from tensorflow.keras.utils import to_categorical from sklearn.metrics import confusion_matrix, roc_curve, auc, roc_auc_score import numpy as np import pandas as pd import seaborn as sn import matplotlib.pyplot as plt from pathlib import Path from scipy import interp from itertools import cycle from os import makedirs, path from ait_sdk.utils import get_summary_text from ait_sdk.utils.mnist import MNIST from ait_sdk.utils.acc_calculator import ACCCalculator # must use modules import shutil # do not remove from ait_sdk.common.files.ait_input import AITInput # do not remove from ait_sdk.common.files.ait_output import AITOutput # do not remove from ait_sdk.common.files.ait_manifest import AITManifest # do not remove from ait_sdk.develop.ait_path_helper import AITPathHelper # do not remove from ait_sdk.utils.logging import get_logger, log, get_log_path # do not remove from ait_sdk.develop.annotation import measures, resources, downloads, ait_main # do not remove # must use modules ######################################### # area:import # do not edit ######################################### # must use modules import shutil # do not remove from ait_sdk.common.files.ait_input import AITInput # do not remove from ait_sdk.common.files.ait_output import AITOutput # do not remove from ait_sdk.common.files.ait_manifest import AITManifest # do not remove from ait_sdk.develop.ait_path_helper import AITPathHelper # do not remove from ait_sdk.utils.logging import get_logger, log, get_log_path # do not remove from ait_sdk.develop.annotation import measures, resources, downloads, ait_main # do not remove # must use modules ######################################### # area:create manifest # should edit ######################################### if not is_ait_launch: from ait_sdk.common.files.ait_manifest_generator import AITManifestGenerator manifest_genenerator = AITManifestGenerator(current_dir) manifest_genenerator.set_ait_name('eval_mnist_acc_tf2.3') manifest_genenerator.set_ait_description('Only Sequential API Model\n\n<QualityMeasurement>\nAccuracy=TP+TNTP+FP+FN+TN\nPrecision=TPTP+FP\nRecall=TPTP+FN\nF−measure=2Recall∗PrecisionRecall+Precision\nAUC\n\n<Resources>\nROC曲線\n混同行列\nNG予測画像') manifest_genenerator.set_ait_author('AIST') manifest_genenerator.set_ait_email('') manifest_genenerator.set_ait_version('0.1') manifest_genenerator.set_ait_quality('https://airc.aist.go.jp/aiqm/quality/internal/機械学習モデルの正確性') manifest_genenerator.set_ait_reference('') manifest_genenerator.add_ait_inventories(name='trained_model', type_='model', description='Tensorflow 2.3で学習したモデル', format_=['h5'], schema='https://support.hdfgroup.org/HDF5/doc/') manifest_genenerator.add_ait_inventories(name='test_set_images', type_='dataset', description='テスト画像セット(MNISTフォーマット)', format_=['gz'], schema='http://yann.lecun.com/exdb/mnist/') manifest_genenerator.add_ait_inventories(name='test_set_labels', type_='dataset', description='テスト画像ラベル(MNISTフォーマット)', format_=['gz'], schema='http://yann.lecun.com/exdb/mnist/') manifest_genenerator.add_ait_parameters(name='class_count', type_='int', description='multiple classification class number', default_val='10') manifest_genenerator.add_ait_parameters(name='image_px_size', type_='int', description='prediction image pixel size', default_val='28') manifest_genenerator.add_ait_parameters(name='auc_average', type_='string', description='{‘micro’, ‘macro’, ‘samples’, ‘weighted’}\r\nref:\r\nhttps://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html', default_val='macro') manifest_genenerator.add_ait_parameters(name='auc_multi_class', type_='string', description='{‘raise’, ‘ovr’, ‘ovo’}\nref:\nhttps://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html', default_val='raise') manifest_genenerator.add_ait_measures(name='Accuracy', type_='float', description='(TP+TN) / (TP+FP+FN+TN)', structure='single') manifest_genenerator.add_ait_measures(name='Precision', type_='float', description='TP / (TP+FP)', structure='single') manifest_genenerator.add_ait_measures(name='Recall', type_='float', description='TP / (TP+FN)', structure='single') manifest_genenerator.add_ait_measures(name='F−measure', type_='float', description='(2Recall∗Precision) / (Recall+Precision)', structure='single') manifest_genenerator.add_ait_measures(name='AUC', type_='float', description='Area under the ROC curve', structure='single') manifest_genenerator.add_ait_measures(name='AccuracyByClass', type_='float', description='Accuracy for each class.', structure='sequence') manifest_genenerator.add_ait_measures(name='PrecisionByClass', type_='float', description='Precision for each class.', structure='sequence') manifest_genenerator.add_ait_measures(name='RecallByClass', type_='float', description='Recall for each class.', structure='sequence') manifest_genenerator.add_ait_measures(name='F−measureByClass', type_='float', description='F−measure for each class.', structure='sequence') manifest_genenerator.add_ait_resources(name='ConfusionMatrixHeatmap', path='/usr/local/qai/resources/1/confusion_matrix.png', type_='picture', description='混同行列(ヒートマップ)') manifest_genenerator.add_ait_resources(name='ROC-curve', path='/usr/local/qai/resources/2/roc_curve.png', type_='picture', description='ROC曲線') manifest_genenerator.add_ait_resources(name='NGPredictImages', path='/usr/local/qai/resources/3/ng_predict_actual_class_{}.png', type_='picture', description='推論NGとなった画像の一覧を、正解ラベルの枚数分だけ出力する') manifest_genenerator.add_ait_downloads(name='Log', path='/usr/local/qai/downloads/1/ait.log', description='AIT実行ログ') manifest_genenerator.add_ait_downloads(name='ConfusionMatrixCSV', path='/usr/local/qai/downloads/2/confusion_matrix.csv', description='混同行列') manifest_genenerator.add_ait_downloads(name='PredictionResult', path='/usr/local/qai/downloads/3/prediction.csv', description='ID,正解ラベル,推論結果確率(ラベル毎)') manifest_path = manifest_genenerator.write() ######################################### # area:create input # should edit ######################################### if not is_ait_launch: from ait_sdk.common.files.ait_input_generator import AITInputGenerator input_generator = AITInputGenerator(manifest_path) input_generator.add_ait_inventories(name='trained_model', value='trained_model/model_1.h5') input_generator.add_ait_inventories(name='test_set_images', value='test_set_images/t10k-images-idx3-ubyte.gz') input_generator.add_ait_inventories(name='test_set_labels', value='test_set_labels/t10k-labels-idx1-ubyte.gz') input_generator.set_ait_params(name='class_count', value='10') input_generator.set_ait_params(name='image_px_size', value='28') input_generator.set_ait_params(name='auc_average', value='macro') input_generator.set_ait_params(name='auc_multi_class', value='raise') input_generator.write() ######################################### # area:initialize # do not edit ######################################### logger = get_logger() ait_manifest = AITManifest() ait_input = AITInput(ait_manifest) ait_output = AITOutput(ait_manifest) if is_ait_launch: # launch from AIT current_dir = path.dirname(path.abspath(__file__)) path_helper = AITPathHelper(argv=sys.argv, ait_input=ait_input, ait_manifest=ait_manifest, entry_point_dir=current_dir) else: # launch from jupyter notebook # ait.input.json make in input_dir input_dir = '/usr/local/qai/mnt/ip/job_args/1/1' current_dir = %pwd path_helper = AITPathHelper(argv=['', input_dir], ait_input=ait_input, ait_manifest=ait_manifest, entry_point_dir=current_dir) ait_input.read_json(path_helper.get_input_file_path()) ait_manifest.read_json(path_helper.get_manifest_file_path()) ### do not edit cell ######################################### # area:functions # should edit ######################################### # 1/9 @log(logger) @measures(ait_output, 'Accuracy', 'Precision', 'Recall', 'F−measure') def calc_acc_all(y_test, y_pred) -> (float, float, float, float): calc = ACCCalculator() one_hot_y = to_categorical(y_test) return calc.average_accuracy(one_hot_y, y_pred).numpy() , \ calc.macro_precision(one_hot_y, y_pred).numpy() , \ calc.macro_recall(one_hot_y, y_pred).numpy() , \ calc.macro_f_measure(one_hot_y, y_pred).numpy() ######################################### # area:functions # should edit ######################################### # 2/9 @log(logger) @measures(ait_output, 'AccuracyByClass', 'PrecisionByClass', 'RecallByClass', 'F−measureByClass', is_many=True) def calc_acc_by_class( y_test, y_pred) -> (List[float], List[float], List[float], List[float]): calc = ACCCalculator() one_hot_y = to_categorical(y_test) return calc.all_class_accuracy(one_hot_y, y_pred) , \ [v.numpy() for v in calc.all_class_precision(one_hot_y, y_pred)] , \ [v.numpy() for v in calc.all_class_recall(one_hot_y, y_pred)] , \ [v.numpy() for v in calc.all_class_f_measure(one_hot_y, y_pred)] ######################################### # area:functions # should edit ######################################### # 3/9 @log(logger) @downloads(ait_output, path_helper, 'ConfusionMatrixCSV') def save_confusion_matrix_csv(y_test, y_pred, file_path: str=None) -> None: makedirs(str(Path(file_path).parent), exist_ok=True) cmx_data = confusion_matrix(y_test, K.argmax(y_pred)) logger.info(cmx_data) np.savetxt(file_path, cmx_data, fmt='%d', delimiter=',') ######################################### # area:functions # should edit ######################################### # 4/9 @log(logger) @resources(ait_output, path_helper, 'ConfusionMatrixHeatmap') def save_confusion_matrix_heatmap(y_test, y_pred, file_path: str=None) -> None: makedirs(str(Path(file_path).parent), exist_ok=True) y_pred = K.argmax(y_pred) labels = sorted(list(set(y_test))) cmx_data = confusion_matrix(y_test, y_pred, labels=labels) df_cmx = pd.DataFrame(cmx_data, index=labels, columns=labels) fig = plt.figure(dpi=100, figsize=(8,6)) sn.heatmap(df_cmx, annot=True, fmt='g' ,square = True) ax = fig.add_subplot(1, 1, 1) ax.set_xlabel('Predicted class') ax.set_ylabel('Actual class') ax.set_title('Plot of Confusion Matrix') # save as png plt.savefig(file_path) ######################################### # area:functions # should edit ######################################### # 5/9 @log(logger) @resources(ait_output, path_helper, 'ROC-curve') def save_roc_curve(y_test, y_pred, n_classes: int, file_path: str=None) -> None: makedirs(str(Path(file_path).parent), exist_ok=True) y_true = to_categorical(y_test) y_score = y_pred fpr = dict() tpr = dict() roc_auc = dict() # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_true.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) # Compute ROC curve and ROC area for each class for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_true[:, i], y_score[:, i], drop_intermediate=False) roc_auc[i] = auc(fpr[i], tpr[i]) # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= n_classes fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) # Plot all ROC curves plt.figure(dpi=100, figsize=(8,6)) plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["micro"]), color='deeppink', linestyle=':', linewidth=4) plt.plot(fpr["macro"], tpr["macro"], label='macro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["macro"]), color='navy', linestyle=':', linewidth=4) colors = cycle(['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']) #colors = cycle(['aqua', 'darkorange', 'cornflowerblue']) for i, color in zip(range(n_classes), colors): plt.plot(fpr[i], tpr[i], color=color, lw=2, label='ROC curve of class {0} (area = {1:0.2f})' ''.format(i, roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--', lw=2) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Some extension of Receiver operating characteristic to multi-class') plt.legend(loc="lower right") plt.savefig(file_path) ######################################### # area:functions # should edit ######################################### # 6/9 @log(logger) @measures(ait_output, 'AUC') def calc_auc(y_test, y_pred, multi_class: str, average: str) -> float: y_true = to_categorical(y_test) y_score = y_pred return roc_auc_score(y_true, y_score, multi_class=multi_class, average=average) ######################################### # area:functions # should edit ######################################### # 7/9 @log(logger) @resources(ait_output, path_helper, 'NGPredictImages') def save_ng_predicts(X_test, y_test, y_pred, n_classes: int, file_path: str=None) -> List[str]: makedirs(str(Path(file_path).parent), exist_ok=True) out_files = [] y_true = y_test y_pred = K.argmax(y_pred) # unmach_classes={label:[{predict: index}] } unmach_classes={} for actual_class_no in range(n_classes): unmach_classes[actual_class_no] = {} for predict_class_no in range(n_classes): unmach_classes[actual_class_no][predict_class_no] = [] for i in range(y_pred.shape[-1]): if y_true[i] != y_pred[i].numpy(): unmach_classes[y_true[i]][y_pred[i].numpy()].append(i) # visualization def draw_digit(data, row, col, n, index) -> None: ax = plt.subplot(row, col, n) ax.axis("off") ax.set_title(str(index)) plt.imshow(data, cmap = "gray") def draw_text(text, row, col, n) -> None: ax = plt.subplot(row, col, n) ax.axis("off") # build a rectangle in axes coords left, width = .25, .5 bottom, height = .25, .5 right = left + width top = bottom + height ax.text(0.5*(left+right), 0.5*(bottom+top), text, horizontalalignment='center', verticalalignment='center', fontsize=12, color='black', transform=ax.transAxes) show_size = 10 + 1 for actual_class_no in range(n_classes): fig = plt.figure(figsize=(n_classes,show_size)) fig.suptitle('actual class.{}'.format(actual_class_no), fontsize=20) unmachies = unmach_classes[actual_class_no] for predict_class_no in range(n_classes): indexes = unmach_classes[actual_class_no][predict_class_no] offset = predict_class_no * show_size + 1 draw_text('predict\nclass\n{}'.format(predict_class_no), n_classes, show_size, offset) image_count = 0 for index in indexes: offset += 1 draw_digit(X_test[index], n_classes, show_size, offset, index) # 11枚分以上は読み捨て image_count += 1 if image_count >= 10: break out_file = file_path.format(actual_class_no) out_files.append(out_file) plt.savefig(out_file) return out_files ######################################### # area:functions # should edit ######################################### # 8/9 @log(logger) @downloads(ait_output, path_helper, 'PredictionResult') def save_prediction_result(y_test, y_pred, file_path: str=None) -> None: makedirs(str(Path(file_path).parent), exist_ok=True) # Label + PredictProva out_data = np.hstack([y_test.reshape(y_test.shape[0], 1), y_pred]) index = [str(i) for i in range(1, y_test.shape[0]+1)] columns = ['Label']+[f'PredictionProva_Class_{i}' for i in range(1,y_pred.shape[1]+1)] df = pd.DataFrame(data=out_data, index=index, columns=columns, dtype='float') df.to_csv(file_path) ######################################### # area:functions # should edit ######################################### # 9/9 @log(logger) @downloads(ait_output, path_helper, 'Log') def move_log(file_path: str=None) -> None: makedirs(str(Path(file_path).parent), exist_ok=True) shutil.move(get_log_path(), file_path) ######################################### # area:main # should edit ######################################### @log(logger) @ait_main(ait_output, path_helper) def main() -> None: image_px_size = ait_input.get_method_param_value('image_px_size') # インベントリのMNISTラベル・画像を読み込み mnist = MNIST() X_test = mnist.load_image(ait_input.get_inventory_path('test_set_images'), image_px_size) y_test = mnist.load_label(ait_input.get_inventory_path('test_set_labels')) # 前処理として、画像を最大値255で割って0.0 - 1.0に規格化 X_test_normalize = X_test / 255 # モデル読み込み model = tf.keras.models.load_model(ait_input.get_inventory_path('trained_model')) logger.info(get_summary_text(model)) # 推論 y_pred = model.predict(X_test_normalize) # 全体精度評価値(measure) calc_acc_all(y_test=y_test, y_pred=y_pred) # クラス別精度評価値(measure) calc_acc_by_class(y_test=y_test, y_pred=y_pred) # 混同行列(CSV) save_confusion_matrix_csv(y_test=y_test, y_pred=y_pred) # 混同行列(PNG) save_confusion_matrix_heatmap(y_test=y_test, y_pred=y_pred) # ROC曲線(PNG) save_roc_curve(y_test=y_test, y_pred=y_pred, n_classes=ait_input.get_method_param_value('class_count')) # AUC(measure) calc_auc(y_test=y_test, y_pred=y_pred, multi_class=ait_input.get_method_param_value('auc_multi_class'), average=ait_input.get_method_param_value('auc_average')) # NG画像(PNG) save_ng_predicts(X_test=X_test, y_test=y_test, y_pred=y_pred, n_classes= int(ait_input.get_method_param_value('class_count'))) # PredictionResult(CSV) save_prediction_result(y_test=y_test, y_pred=y_pred) # log(Text) move_log() ######################################### # area:entory point # do not edit ######################################### if __name__ == '__main__': main() ######################################### # area:license attribute set # should edit ######################################### ait_owner='AIST' ait_creation_year='2020' ######################################### # area:prepare deproy # do not edit ######################################### if not is_ait_launch: from ait_sdk.deploy import prepare_deploy from ait_sdk.license.license_generator import LicenseGenerator current_dir = %pwd prepare_deploy(ait_manifest, ait_sdk_name, current_dir, requirements_path, is_remote_deploy=True) # output License.txt license_generator = LicenseGenerator() license_generator.write('../top_dir/LICENSE.txt', ait_creation_year, ait_owner) ```
github_jupyter
``` from google.colab import drive drive.mount('/content/drive') ``` #**Part 1 - Data gathering and feature engineering** **Libraries** ``` import numpy as np #Linear_Algebra import matplotlib.pyplot as plt import pandas as pd #Data_Processing import pandas_datareader as pdr from scipy import stats %matplotlib inline from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" pip install -q yfinance --upgrade #Import Yahoo Finance import yfinance as yf yf.pdr_override() #CISCO data SELECTED_STOCK = 'CSCO' start = '2010-12-17' end = '2018-12-17' #Download NVIDIA stock price data for the past 10 yrs to date stock_data = pdr.get_data_yahoo(SELECTED_STOCK, start, end) stock_data.head(10) ``` **Feature Engineering** ``` #Getting the Open price stock_data_open = stock_data.Open.values reshaped_stock_data_open = np.reshape(stock_data_open, (-1, 1)) reshaped_stock_data_open #validity check np.mean(reshaped_stock_data_open)==np.mean(stock_data_open) ``` ###**Analysis** ``` #Finding log returns by changing the close-close price change stock_close = stock_data["Close"] stock_percent_change = np.log(stock_close / stock_close.shift(1)) *100 stock_percent_change.head() #Check for normality in the log returns plt.hist(stock_percent_change[1:], density= True) #Using Scipy to get more info like skweness, Kurtosis stats.describe(stock_percent_change[1:]) ``` >--For investors, the high kurtosis of the return distribution(16.64) implies that the investor will experience occasional extreme returns (either positive or negative), more extreme than the usual + or - three standard deviations from the mean that is predicted by the normal distribution of returns. This phenomenon is known as kurtosis risk. >--The kurtosis isn't close to 0, so a normal distribution for the returns is not assumed ``` print('CISCO : ', stats.kurtosistest(stock_percent_change[1:])) ``` >CISCO : KurtosistestResult(statistic=21.6296870467075, pvalue=9.442157604570577e-104) >--Since the Z value is 21.63 which is higher than 1.96, it leads us to conclude that we're not seeing Kurtosis from a normal distribution >--since the pvalue is <0.05, we reject the null hypothesis, that is, the kurtosis is not from a normal distribution --There is a very low probability (<0.05) that we're seeing these results from a random chance. ####**Stocks Fundamental Data** ``` !pip install yfinance CISCO = yf.Ticker("CSCO") ``` #####**Key Ratios** ``` # get price to book pb = CISCO.info['priceToBook'] print('Price to Book Ratio is: %.2f' % pb) ``` #####**Options Data** ``` pip install nsepy from datetime import date from nsepy import get_history stock_opt = get_history(symbol="CSCO", start=date(2019, 1, 15), end=date(2019, 2, 1), option_type="CE", strike_price=2000, expiry_date=date(2019, 2, 28)) stock_opt.head() ``` **Analyze performance** ####Visualization and Analysis ``` # Install pyfolio if not already installed !pip install pyfolio import pyfolio as pf # Define the ticker list tickers_list = ['CSCO'] # Import pandas and create a placeholder for the data import pandas as pd data = pd.DataFrame(columns=tickers_list) # Feth the data import yfinance as yf for ticker in tickers_list: data[ticker] = yf.download(ticker, period='5y',)['Adj Close'] # Compute the returns of individula stocks and then compute the daily mean returns. # The mean return is the daily portfolio returns with the above four stocks. data = data.pct_change().dropna().mean(axis=1) # Print first 5 rows of the data data.head() pf.create_full_tear_sheet(data) ``` ##**Volatility** ``` ## Computing Volatility # Compute the logarithmic returns using the Closing price stock_data['Log_Ret'] = np.log(stock_data['Close'] / stock_data['Close'].shift(1)) # Compute Volatility using the pandas rolling standard deviation function stock_data['Volatility'] = pd.Series(stock_data['Log_Ret']).rolling(window=252).std() * np.sqrt(252) print(stock_data.tail(15)) # Plot the CISCO Price series and the Volatility stock_data[['Close', 'Volatility']].plot(subplots=True, color='blue',figsize=(8, 6)) ``` ###**Measures of risk adjusted return based on volatility** ####Sharpe ratio >Sharpe ratio = (Mean return − Risk-free rate) / Standard deviation of return ``` # Sharpe Ratio def sharpe(returns, rf, days=252): volatility = returns.std() * np.sqrt(days) sharpe_ratio = (returns.mean() - rf) / volatility return sharpe_ratio ``` ##**Indicators** ###**RSI** ``` from datetime import datetime import matplotlib.pyplot as plt import pandas_datareader as pdd import pyEX as p ticker = 'CSCO' timeframe = '1y' df = p.chartDF(ticker, timeframe) df = df[['Open']] df.reset_index(level=0, inplace=True) df.columns=['ds','y'] delta = df.y.diff().dropna() u = delta * 0 d = u.copy() u[delta > 0] = delta[delta > 0] d[delta < 0] = -delta[delta < 0] u[u.index[14-1]] = np.mean( u[:14]) u = u.drop(u.index[:(14-1)]) d[d.index[14-1]] = np.mean( d[:14]) d = d.drop(d.index[:(14-1)]) rs = pdd.stats.moments.ewma(u, com=14-1, adjust=False) / \ pdd.stats.moments.ewma(d, com=14-1, adjust=False) rsi = 100 - 100 / (1 + rs) plt.plot(df.ds, rsi, label='CISCO RSI', color='orange') plt.legend(loc='upper left') plt.show() ``` ###**Simple Moving Average Strategy** **Feature Scaling** ``` from sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler(feature_range = (0,1)) scaled_data = sc.fit_transform(reshaped_stock_data_open) def timestamp(n_period, scaled_data): x_train = [] y_train = [] #1 output to predict for i in range(n_period,len(scaled_data)): x_train.append(scaled_data[i-n_period:i,0]) y_train.append(scaled_data[i,0]) x_train, y_train = np.array(x_train), np.array(y_train) #reshaping x_train_ = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) return x_train_, x_train, y_train x_train_, x_train, y_train = timestamp(60, scaled_data) ``` #**Part 2 - Model Identification** ##**Decision Tree (Regression)** ``` from sklearn.ensemble import BaggingRegressor from sklearn.tree import DecisionTreeRegressor dt = DecisionTreeRegressor() decision_tree_regr = BaggingRegressor(dt, n_estimators=10, random_state=0) decision_tree_regr.fit(x_train, y_train) ``` ##**Recurrent Neural Network (RNN)** ``` import warnings warnings.simplefilter(action='ignore', category=FutureWarning) #Importing the keras libraries and packages from tensorflow.python.keras.layers import Dense, LSTM, Dropout from tensorflow.python.keras import Sequential regressor = Sequential() #Adding the first LSTM Layer and some Dropout regularisation regressor.add(LSTM(units=50, return_sequences=True, input_shape = (x_train_.shape[1], 1))) regressor.add(Dropout(rate = 0.2)) x_train.shape[1] #Adding the second LSTM Layer and some Dropout regularisation regressor.add(LSTM(units=50, return_sequences=True)) regressor.add(Dropout(rate = 0.2)) #Adding the third LSTM Layer and some Dropout regularisation regressor.add(LSTM(units=50, return_sequences=True)) regressor.add(Dropout(rate = 0.2)) #Adding the fourth LSTM Layer and some Dropout regularisation regressor.add(LSTM(units=50)) regressor.add(Dropout(rate = 0.2)) #Adding the output layer regressor.add(Dense(units=1)) #compiling the RNN regressor.compile(optimizer='adam', loss='mean_squared_error') #fitting the RNN to the training set regressor.fit(x_train_, y_train, epochs=50, batch_size = 32) ``` **Save the model** ``` regressor = regressor.save("regressor.h5") ``` **Load the model** ``` from tensorflow.python.keras.models import load_model regressor = load_model("regressor.h5") ``` ##**Making the predictions and visualising the results** ``` # Getting the real/test stock price of 2019 test_stock_data = pdr.get_data_yahoo(SELECTED_STOCK, start = '2018-12-18', end = '2019-12-17') real_stock_price = test_stock_data.iloc[:, 1:2].values dataset_total = pd.concat((stock_data['Open'], test_stock_data['Open']), axis = 0) inputs = dataset_total[len(dataset_total) - len(test_stock_data) - 60:].values inputs = inputs.reshape(-1,1) inputs = sc.transform(inputs) X_test = [] for i in range(60, 310): #80 because we're predicting 20 records X_test.append(inputs[i-60:i, 0]) X_test = np.array(X_test) X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) predicted_stock_price = regressor.predict(X_test) predicted_stock_price = sc.inverse_transform(predicted_stock_price) #retranform the output because our input data was scaled between 0 and 1. # Visualising the results plt.plot(real_stock_price, color = 'red', label = 'Real CISCO Stock Price') plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted CISCO Stock Price') plt.title('CISCO Stock Price Prediction') plt.xlabel('Time') plt.ylabel('CISCO Stock Price') plt.legend() plt.show() ```
github_jupyter
<div style="width:1000 px"> <div style="float:right; width:98 px; height:98px;"> <img src="https://raw.githubusercontent.com/Unidata/MetPy/master/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;"> </div> <h1>Plotting and Jupyter Notebooks</h1> <h3>Unidata Python Workshop</h3> <div style="clear:both"></div> </div> <hr style="height:2px;"> One of the most common tasks we face as scientists is making plots. Visually assessing data is one of the best ways to explore it - who can look at a wall of tabular data and tell anything? In this lesson we'll show how to make some basic plots in notebooks and introduce interactive widgets. Matplotlib has many more features than we could possibly talk about - this is just a taste of making a basic plot. Be sure to browse the [matplotlib gallery](https://matplotlib.org/gallery.html) for ideas, inspiration, and a sampler of what's possible. ``` # Import matplotlib as use the inline magic so plots show up in the notebook import matplotlib.pyplot as plt %matplotlib inline # Make some "data" x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] y = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] ``` ## Basic Line and Scatter Plots ``` # Make a simple line plot plt.plot(x, y) # Play with the line style plt.plot(x, y, color='tab:red', linestyle='--') # Make a scatter plot plt.plot(x, y, color='tab:orange', linestyle='None', marker='o') ``` ## Adding Interactivity to Plots ``` # Let's make some more complicated "data" using a sine wave with some # noise superimposed. This gives us lots of things to manipulate - the # amplitude, frequency, noise amplitude, and DC offset. import numpy as np x = np.linspace(0, 2*np.pi, 100) y = 10 * np.sin(x) + np.random.random(100)*5 + 20 # Have a look at the basic form of the data plt.plot(x, y) plt.xlabel('X Values') plt.ylabel('Y Values') plt.title('My Temperature Data') # Let's add some interactive widgets from ipywidgets import interact def plot_pseudotemperature(f, A, An, offset): x = np.linspace(0, 2*np.pi, 100) y = A * np.sin(f * x) + np.random.random(100) * An + offset fig = plt.figure() plt.plot(x, y) plt.xlabel('X Values') plt.ylabel('Y Values') plt.title('My Temperature Data') plt.show() interact(plot_pseudotemperature, f = (0, 10), A = (1, 5), An = (1, 10), offset = (10, 40)) # We can specify the type of slider, range, and defaults as well from ipywidgets import FloatSlider, IntSlider def plot_pseudotemperature2(f, A, An, offset, title): x = np.linspace(0, 2*np.pi, 100) y = A * np.sin(f * x) + np.random.random(100) * An + offset fig = plt.figure() plt.plot(x, y) plt.xlabel('X Values') plt.ylabel('Y Values') plt.title(title) plt.show() interact(plot_pseudotemperature2, f = IntSlider(min=1, max=7, value=3), A = FloatSlider(min=1, max=10, value=5), An = IntSlider(min=1, max=10, value=1), offset = FloatSlider(min=1, max=40, value=20), title = 'My Improved Temperature Plot') ```
github_jupyter
分析並預測信用卡用戶違約資料 ==================== 2005年台灣信用卡違約用戶資料分析。由 Kaggle 所提供之資料,[Default Payments of Credit Card Clients in Taiwan from 2005](https://www.kaggle.com/uciml/default-of-credit-card-clients-dataset),其中有30,000筆台灣信用卡用戶的用戶資料以及違約情形,資料格式包含了性別、教育程度、信用卡額度、年齡等基本資料,以及2005年4月到9月付款狀況、信用卡帳務、還款金額等資料。 本研究除了針對用戶的分佈資料進行分析之外,還利用信用卡用戶的付款狀況、信用卡帳務、還款金額等資料建立模型分析並預測用戶是否會違約。 模型是利用機器學習的 KMean Cluster 結合 Linear Probability Model 統計模型完成,先利用 KMean Cluster 將用戶依照信用卡付款狀況進行分類,之後再將分類完的分群進行利用 Linear Probability Model 計算該群體的違約機率,利用此方法可以將原本整體違約機率約22%的全體用戶,分成違約機率 10% 至 78% 共 19 群的群體。最後再依分群完的機率經過設定的機率閥值換算後,準確率可達 81.15% 。與 Kaggle 上其他模型的 82% 相近。 ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import statsmodels.api as sm import random from sklearn.cluster import KMeans from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn import linear_model pd.options.display.max_columns = 35 pd.options.display.max_rows = 100 rawdata = pd.read_csv('UCI_Credit_Card.csv', index_col='ID') print('Shape of Raw Data:', rawdata.shape) print('一共30,000筆資料,含是否違約合計24個變數') rawdata.rename(columns={ 'PAY_0':'PAY_Sept', 'PAY_2':'PAY_Aug', 'PAY_3':'PAY_Jul', 'PAY_4':'PAY_Jun', 'PAY_5':'PAY_May', 'PAY_6':'PAY_Apr', 'BILL_AMT1':'BILL_AMT_Sept','BILL_AMT2':'BILL_AMT_Aug', 'BILL_AMT3':'BILL_AMT_Jul', 'BILL_AMT4':'BILL_AMT_Jun', 'BILL_AMT5':'BILL_AMT_May', 'BILL_AMT6':'BILL_AMT_Apr', 'PAY_AMT1':'PAY_AMT_Sept','PAY_AMT2':'PAY_AMT_Aug', 'PAY_AMT3':'PAY_AMT_Jul', 'PAY_AMT4':'PAY_AMT_Jun', 'PAY_AMT5':'PAY_AMT_May', 'PAY_AMT6':'PAY_AMT_Apr', 'default.payment.next.month':'is_default' }, inplace=True) repay_status = rawdata[['PAY_Sept','PAY_Aug','PAY_Jul','PAY_Jun','PAY_May','PAY_Apr']] bill_statement = rawdata[['BILL_AMT_Sept','BILL_AMT_Aug','BILL_AMT_Jul', 'BILL_AMT_Jun','BILL_AMT_May','BILL_AMT_Apr',]] prev_payment = rawdata[['PAY_AMT_Sept','PAY_AMT_Aug','PAY_AMT_Jul', 'PAY_AMT_Jun','PAY_AMT_May','PAY_AMT_Apr']] rawdata.head(5) ``` ### is_default 檢查違約人數及比例 ``` is_default = rawdata['is_default'] show_default = pd.concat([is_default.value_counts(), is_default.value_counts(normalize=True)], axis=1) show_default.columns = ['人數', '百分比'] show_default.index = ['正常', '違約'] print('整體違約人數及比例:') show_default ``` # 1. Demographic Factors 人口因素 ### Limit Balance 信用額度 ``` # Limit Balance 信用額度 limit_bal = rawdata.LIMIT_BAL print('信用額度之敘述統計資料:') print(limit_bal.describe().round()) %matplotlib inline fig, ax = plt.subplots(figsize=(15,7)) # 總體信用額度分佈情形 n, bins, patches = plt.hist(limit_bal, bins=200) ax.text(50000,3365,'($50,000, 3365)') ax.text(200000,1528,'($200,000, 1528)') ax.text(360000,881,'($360,000, 881)') ax.text(500000,722,'($500,000, 722)') ax.text(930000,50,'($1,000,000, max)') ax.text(167484,2500,'Average: $167484') # 違約用戶之信用額度分佈情形 n, bins, patches = plt.hist(limit_bal[is_default==1], bins=200) # 用紅線畫出平均數 plt.axvline(x=167484.0, color='red') plt.xlabel plt.legend(['Average Limit Balance of All Clients', 'All Clients', 'Default Clients']) plt.title('Histogram of Limit Balance', fontsize=20) plt.ylabel('Clients') plt.xlabel('NT Dollars') plt.show() ``` 可以看到一個滿有趣的現象,在 1. (\$50,000, 3365) 2. (\$200,000, 1528) 3. (\$360,000, 881) 4. (\$500,000, 722) 這幾個點的時候比例特別的多,應該是有什麼原因,也許是有些門檻之類的,之後可以好好探討,也許把這幾個點的資料拉出來看,可能倒帳的機率有比較低?因為不會倒帳所以核的特別多? ### Gender 性別資料 ``` gender_map = {1:'Male', 2:'Female'} gender = rawdata.SEX.map(gender_map) default_rate_by_gender = gender[is_default==1].value_counts() / \ gender.value_counts() gender_stats = pd.concat([gender.value_counts(), gender.value_counts(normalize=True), gender[is_default==1].value_counts(), default_rate_by_gender], axis=1) gender_stats.columns = ['人數', '人數比例', '違約人數', '違約率'] print('性別資料:') gender_stats ``` 可以看到整體資料中男女性別比例大約是,女性60%、男性40%。而男女的違約比例上,男性違約率24.16%高於女性的20.78%。 ``` # Gender 繪圖 fig, ax = plt.subplots(figsize=(10,5)) ax.text(0,18300,'18,112') ax.text(0,3000,'3,763 (20.8%)') ax.text(1,12300,'11,888') ax.text(1,2200,'2,873 (24.2%)') plt.bar(gender.value_counts().index, gender.value_counts()) plt.bar(gender[is_default==1].value_counts().index, gender[is_default==1].value_counts()) plt.legend(['All Clients', 'Default Clients']) plt.title('Default Clients by Gender', fontsize=20) plt.ylabel('Clients') plt.show() ``` ### Education 學歷 ``` edu_map = {1:'Graduate school', 2:'University', 3:'High school', 4:'Others', 5:'Unknown', 6:'Unknown'} education = rawdata.EDUCATION.map(edu_map) default_rate_by_education = education[is_default==1].value_counts() / \ education.value_counts() education_stats = pd.concat([education.value_counts(), education.value_counts(normalize=True), education[is_default==1].value_counts(), default_rate_by_education], axis=1) education_stats.columns = ['人數', '人數比例', '違約人數', '違約率'] print('學歷資料:') education_stats fig, ax = plt.subplots(figsize=(10,5)) ax.text(0,14030,'14,030') ax.text(0,3330,'3,330 (23.7%)') ax.text(1,10585,'10,585') ax.text(1,2036,'2,036 (19.2%)') ax.text(2,4917,'4,917') ax.text(2,1237,'1,237 (25.1%)') plt.bar(education.value_counts().index, education.value_counts()) plt.bar(education[is_default==1].value_counts().index, education[is_default==1].value_counts()) plt.legend(['All Clients', 'Default Clients']) plt.title('Default Clients by Education', fontsize=20) plt.ylabel('Clients') plt.show() ``` ### Marriage 婚姻狀況 ``` marri_map = {1:'Married', 2:'Single', 3:'Others'} marriage = rawdata.MARRIAGE.map(marri_map) default_rate_by_marriage = marriage[is_default==1].value_counts() / \ marriage.value_counts() marriage_stats = pd.concat([marriage.value_counts(), marriage.value_counts(normalize=True), marriage[is_default==1].value_counts(), default_rate_by_marriage], axis=1) marriage_stats.columns = ['人數', '人數比例', '違約人數', '違約率'] print('婚姻狀況資料:') marriage_stats fig, ax = plt.subplots(figsize=(10,5)) ax.text(0,15964,'15,964') ax.text(0,3341,'3,341(20.9%)') ax.text(1,13659,'13,659') ax.text(1,3206,'3,206 (23.5%)') plt.bar(marriage.value_counts().index, marriage.value_counts()) plt.bar(marriage[is_default==1].value_counts().index, marriage[is_default==1].value_counts()) plt.legend(['All Clients', 'Default Clients']) plt.title('Default Clients by Marriage', fontsize=20) plt.ylabel('Clients') plt.show() ``` ### Age 年齡 ``` age = rawdata.AGE age_bins = [20, 25, 30, 35, 40, 45, 50, 55, 60, np.Inf] age_map = { pd.Interval(20.0, 25.0, closed='right'):'20-25', pd.Interval(25.0, 30.0, closed='right'):'26-30', pd.Interval(30.0, 35.0, closed='right'):'31-35', pd.Interval(35.0, 40.0, closed='right'):'36-40', pd.Interval(40.0, 45.0, closed='right'):'41-45', pd.Interval(45.0, 50.0, closed='right'):'46-50', pd.Interval(50.0, 55.0, closed='right'):'51-55', pd.Interval(55.0, 60.0, closed='right'):'56-60', pd.Interval(60.0, np.Inf, closed='right'):'60-'} age = age.map(age_map) age.value_counts() default_rate_by_age = age[is_default==1].value_counts() / \ age.value_counts() age_stats = pd.concat([age.value_counts(), age.value_counts(normalize=True), age[is_default==1].value_counts(), default_rate_by_age], axis=1) age_stats.columns = ['人數', '人數比例', '違約人數', '違約率'] print('年齡資料:') age_stats.sort_index() # Age age = rawdata.AGE fig, ax = plt.subplots(figsize=(15,7)) ax.text(35,1400,'Average age: 35') n, bins, patches = plt.hist(age, bins=200) n, bins, patches = plt.hist(age[is_default==1], bins=200) # 用紅線畫出平均數 plt.axvline(x=35, color='red') plt.legend(['Average Age', 'All Clients', 'Default Clients']) plt.title('Default Clients by Marriage', fontsize=20) plt.ylabel('Clients') plt.show() ``` # 2. Bill Statement 信用卡帳務 ## 信用卡帳務資料異常 ### 信用卡帳務資料異常,全部都為 0 或是為 負值 信用卡帳務應該要為正或是零以上的值,出現負值所代表的意義與原因還需要再探討,可能是因為帳務記錄出錯或是有其他的涵意;而全部為零很可能是該用戶並未使用該信用卡,在沒有資料的情形下很難去預測該用戶是否違約。 ``` # 信用卡帳務資料異常,全部都為 0 或是為 負值 abnormal_bill_record = bill_statement.loc[(bill_statement<=0).all(axis=1)] print('檢視信用卡帳務資料異常:') abnormal_bill_record.head(10) abnormal_default = is_default[abnormal_bill_record.index] abnormal_default_show = pd.concat([abnormal_default.value_counts(), abnormal_default.value_counts(normalize=True)], axis=1) abnormal_default_show.columns = ['人數', '百分比'] abnormal_default_show.index = ['正常', '違約'] print('帳務資料異常之違約人數及違約率:') abnormal_default_show ``` 信用卡帳務資料異常之違約率為35.9%,比先前計算的整體用戶違約率22.12%來得高,但是取樣不同無法確認是統計上的誤差導致,還是確實帳務資料異常之違約率會比較高,需要進行統計檢定才能得知。 ### 檢查信用卡帳務資料異常用戶的違約率是否與全體用戶有顯著地不同 這裡使用 Linear Probability Model 來檢查是否顯著,將帳務資料異常用戶標記為1,正常的標記為0的dummy variable,對是否違約進行迴歸,如果該dummy variable有顯著的係數的話,即表示帳務資料異常用戶的違約率與全體用戶不同。 ``` # 建立信用卡帳務資料異常用戶的 dummy variable rawdata['is_bill_abnormal'] = 0 rawdata.loc[abnormal_bill_record.index, 'is_bill_abnormal'] = 1 # 利用 OLS 檢驗如果帳務異常(帳務記錄,bill statement, 皆為 0 或負數時),違約比例是 # 否有所不同。這邊將有帳務記錄異常的以 dummy variable 的方式記為 1,否則記為 0 。 model = sm.OLS(rawdata.is_default, sm.add_constant(rawdata.is_bill_abnormal)) result = model.fit() result.summary2() ``` 帳務異常(is_bill_abnormal)的係數為0.1426,且十分顯著,表示有帳務異常的違約比例會較正常的高出14.26%,也就是帳務異常的比例為 21.66% + 14.26% = 35.92%,並且有統計上的支持。 # 3. Repay Status 還款狀態 ## 結合 KMean Cluster 機器學習法與 Linear Probability Model 統計模型計算不同分群違約率 - 經過一些資料探勘後發現,還款狀態的分群結果對於客戶的違約率有很強的解釋力,因此這邊利用KMean Cluster方法,依客戶四月到九月的還款狀態將客戶分群,分群之後利用Linear Probability Model即可計算出各客群的違約率,並且檢視分群結果是否顯著。 - 之後利用Linear Probability Model的調整後R平方來選擇何KMean Cluster需要分成幾群 ## 利用機器學習的 KMean Cluster 方法將客戶依還款狀態分群 ``` # 將客戶依還款狀態分為 10 群(n_clusters=10) n_clusters = 10 kmean_model = KMeans(n_clusters=n_clusters, random_state=1).fit(repay_status) cluster_label = kmean_model.predict(repay_status) # KMean Cluster Label Data cluster_label = pd.Series(cluster_label) cluster_label.index = is_default.index # 觀察 KMean Cluster 分群人數 cluster_counts = cluster_label.value_counts().sort_index() cluster_counts.index = ['cluster_'+str(i) for i in cluster_counts.index] cluster_counts = pd.DataFrame(cluster_counts) cluster_counts.columns = ['人數'] print('KMean Cluster 分群人數:') cluster_counts ``` ## 將分群後的結果以 Linear Probability Model 計算各分群違約率 ``` # 將 KMean Cluster Label 為 dummy variable,用於計算每群違約率 cluster_dummy = pd.get_dummies(cluster_label, prefix='cluster') cluster_dummy = cluster_dummy.join(is_default) # Linear Probability Model model = sm.OLS(cluster_dummy.is_default, sm.add_constant(cluster_dummy.iloc[:,:-2])) result = model.fit() result.summary2() ``` 可以看到 cluster_0 到 cluster_8 的係數相較於 const(cluster_9) 都有顯著的不同,也就是每群的違約率都有所不同,將各個係數加上 const 之後即是各個分群的違約率。 ex. cluster_5 客群的違約率為 0.1960 + 0.4886 = 68.46% ,而 cluster_9 違約率為 19.6%。模型中的調整後R平方為0.131,隨著分群數的增加應可以讓調整後R平方提高。 ``` cluster_ols_params = result.params cluster_default_rate = (cluster_ols_params[1:]+cluster_ols_params[0]).append( pd.Series(cluster_ols_params[0], index=['cluster_'+str(len(cluster_ols_params)-1)])) cluster_default_rate = pd.DataFrame(cluster_default_rate) cluster_default_rate.columns = ['違約率'] cluster_default_rate.join(cluster_counts) ``` 先利用 KMean Cluster 將客戶分群後,再利用 Linear Probability Model 即可迅速得到各分群的違約率,並且同時檢驗各分群是否顯著。這邊可以看到 cluster_0 與 cluster_2 的違約率最低,只有約14%。而 cluster_5 與 cluster_8 的違約率最高,有將近 7 成的違約率。 ## 選擇分群數量 先前分群時是直接選定分 10 群,調整後R平方為0.131,但分群數量會使Linear Probability Model的調整後R平方上升,也就是解釋力的增強,但是分群分太細也可能導致Linear Probability Model的參數過多使得解釋力下降,因此這邊將檢驗分群數從 3 至 50 的結果,依照調整後R平方,與不顯著的係數數量來決定最後選擇的分群數量。 ``` def loop_n_cluster(n_clusters): kmean_model = KMeans(n_clusters=n_clusters, random_state=1).fit(repay_status) cluster_label = kmean_model.predict(repay_status) # KMean Cluster Label Data cluster_label = pd.Series(cluster_label) cluster_label.index = is_default.index # KMean Cluster Dummy Data cluster_dummy = pd.get_dummies(cluster_label, prefix='cluster') cluster_dummy = cluster_dummy.join(is_default) # Linear Probability Model model = sm.OLS(cluster_dummy.is_default, sm.add_constant(cluster_dummy.iloc[:,:-2])) result = model.fit() # 回傳 調整後R平方值 與 P_value 大於 0.05 的係數數量 return result.rsquared_adj, (result.pvalues>0.05).value_counts()[False] # 檢驗分群數量從 2至 50 群的結果 cluster_n_choose = pd.DataFrame(columns=['分群數量', '調整後R2', '不顯著係數數量']) for n in range(2,51): rsq, significant = loop_n_cluster(n) cluster_n_choose = cluster_n_choose.append( pd.DataFrame({'分群數量':[n], '調整後R2':[rsq], '不顯著係數數量':[n-significant]})) cluster_n_choose.set_index('分群數量', inplace=True) # 繪出各分群下的 調整後R2 與 不顯著係數數量 fig, ax = plt.subplots(figsize=(10,10)) plt.subplot(211) plt.plot(cluster_n_choose['調整後R2']) ax.text(19,0.1,'Number of Cluster = 19') plt.title('Adjusted $R^2$') plt.axvline(x=19, color='red') plt.xlabel('Number of Clusters') plt.subplot(212) plt.plot(cluster_n_choose['不顯著係數數量']) plt.axvline(x=19, color='red') plt.title('Number of Insignificant Coefficients') plt.xlabel('Number of Clusters') plt.show() ``` 可以看到分群數量在 19 群之後,調整後R平方增加的速度變緩,而不顯著的係數數量開始迅速上升,因此這裡選擇分 19 群作為 KMean Cluster 的超參數。 ## n_cluster = 19 ``` # 將客戶依還款狀態分為 10 群(n_clusters=10) n_clusters = 19 kmean_model = KMeans(n_clusters=n_clusters, random_state=1).fit(repay_status) cluster_label = kmean_model.predict(repay_status) # KMean Cluster Label Data cluster_label = pd.Series(cluster_label) cluster_label.index = is_default.index # 觀察 KMean Cluster 分群人數 cluster_counts = cluster_label.value_counts().sort_index() cluster_counts.index = ['cluster_'+str(i) for i in cluster_counts.index] cluster_counts = pd.DataFrame(cluster_counts) cluster_counts.columns = ['人數'] # 將 KMean Cluster Label 為 dummy variable,用於計算每群違約率 cluster_dummy = pd.get_dummies(cluster_label, prefix='cluster') cluster_dummy = cluster_dummy.join(is_default) # Linear Probability Model model = sm.OLS(cluster_dummy.is_default, sm.add_constant(cluster_dummy.iloc[:,:-2])) result = model.fit() result.summary2() cluster_ols_params = result.params cluster_default_rate = (cluster_ols_params[1:]+cluster_ols_params[0]).append( pd.Series(cluster_ols_params[0], index=['cluster_'+str(len(cluster_ols_params)-1)])) cluster_default_rate = pd.DataFrame(cluster_default_rate) cluster_default_rate.columns = ['違約率'] print('n_cluster=19之違約率與該群人數:') cluster_default_rate.join(cluster_counts) ``` ## 設定違約率閥值(Critical Probability)計算模型準確率 假設一給定的違約機率閥值,如果該用戶分群後的違約機率超過該閥值,則設定該用戶會違約,根據此規則計算整體的的準確率。 ``` default_rate_map = cluster_default_rate default_rate_map.index = list(range(n_clusters)) cluster_simul = cluster_label.map(default_rate_map.iloc[:,0]) cluster_simul = pd.DataFrame(cluster_simul).join(is_default) cluster_simul.rename(columns={0:'model_prob'}, inplace=True) crit_prob = np.arange(0.1,1.0,0.01) for crit in crit_prob: cluster_simul[str(round(crit,2))] = \ cluster_simul['model_prob'].apply(lambda default_prob: 1 if default_prob>crit else 0) model_accuracy = [accuracy_score(cluster_simul.is_default, cluster_simul[c]) for c in cluster_simul.columns[2:]] model_accuracy = pd.Series(model_accuracy) model_accuracy.index = crit_prob fig, ax = plt.subplots(figsize=(10,5)) plt.plot(model_accuracy) ax.text(0.45, 0.7, 'Critical Probability = 45% ~ 56% \n Max Accuracy = 81.15%') plt.axvline(x=0.45, color='red') plt.title('Model Accuracy of Critical Probability') plt.xlabel('Critical Probability') plt.ylabel('Accuracy') plt.show() ``` 在分群19群的情形下,設定違約機率閥值在45%至56%時,此模型的準確率最高,達到81.15%
github_jupyter
# Ex 1-1 by Chainer - https://docs.chainer.org/en/stable/examples/mnist.html - https://github.com/kose/chainer-linear-regression/blob/master/net.py - https://multithreaded.stitchfix.com/blog/2015/12/09/intro-to-chainer/ ## I. 케라스처럼 제공하는 trainer 툴을 이용하는 방법 내장된 trainer를 이용할 때는 모델 클래스의 forward(self, x, t)가 오류값을 생성하는 함수로 정의되어야 한다. 자체적으로 training code를 작성하는 경우에는 forward()는 주로 모델을 출력값을 생성하는 함수로 사용될 수 있다. 그리고 클래스를 통한 모델 구성시, 케라스 방식과 달리 변수들이 나오기 전에 with self.init_scope()을 열어주고 시작해야 한다. 그렇지 않으면 변수들이 학습되지 않는다. ### 단순한 모델 구성 - https://stackoverflow.com/questions/56111935/regression-with-chainer chainer.links.Classifier()를 이용하면 loss와 accuracy 정의를 포함하는 model로 만들 수 있다. 이 모델을 만들어야 하는 이유는 이렇게 만들어나야 trainer를 사용할 수 있기 때문이다. 인공지능을 모르는 사람들에게는 편리한 방법이 될 수 있다. ``` import chainer import numpy as np x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1) y = x * 2 + 1 predictor = chainer.links.Linear(1,1) model = chainer.links.Classifier(predictor, lossfun=chainer.functions.mean_squared_error, accfun=chainer.functions.mean_squared_error) #model = chainer.links.Linear(1,1) Optimizer = chainer.optimizers.SGD() Optimizer.setup(model) train = list(zip(x[:2,:1], y[:2,:1])) Train_iter = chainer.iterators.SerialIterator(train, 2) Updater = chainer.training.updaters.StandardUpdater(Train_iter, Optimizer) Trainer = chainer.training.Trainer(Updater, (1000, 'epoch')) Trainer.run() predictor(x) ``` ### 클래스를 통한 모델 구성 ``` import chainer import numpy as np x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1) y = x * 2 + 1 class Model(chainer.Chain): def __init__(self): super().__init__() with self.init_scope(): self.layer = chainer.links.Linear(1,1) def predict(self, x): return self.layer(x) def forward(self, x, t): return chainer.functions.mean_squared_error(self.predict(x), t) model = Model() #model = chainer.links.Linear(1,1) Optimizer = chainer.optimizers.SGD() Optimizer.setup(model) train = list(zip(x[:2,:1], y[:2,:1])) Train_iter = chainer.iterators.SerialIterator(train, 2) Updater = chainer.training.updaters.StandardUpdater(Train_iter, Optimizer) Trainer = chainer.training.Trainer(Updater, (1000, 'epoch')) Trainer.run() model.predict(x) ``` ## II. 내장된 trainer를 사용하지 않고 학습하기 ### 단순한 모델 구성 Chainer는 모델을 만들 때, 케라스나 파이토치하고 다른 형태로 만든다. 예제 1-1은 단일 노드로 구성된 단일 계층 네트웍을 다루고 있기 때문에 Linear(1,1)로 뉴럴넷을 모델링했다. 복수 계층을 가지는 경우는 다른 형태로 모델링을 구성해야 한다. ``` import chainer import numpy as np x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1) y = x * 2 + 1 model = chainer.links.Linear(1,1) optimizer = chainer.optimizers.SGD() optimizer.setup(model) for _ in range(1000): output = model(x) loss = chainer.functions.mean_squared_error(y, output) model.zerograds() loss.backward() optimizer.update() model(x) ``` ### 클래스를 통한 모델 구성 - https://docs.chainer.org/en/stable/examples/train_loop.html 직접 학습하는 경우도 마찬가지로 클래스를 통한 모델 구성시 케라스 방식과 달리 변수들이 나오기 전에 with self.init_scope()을 열어주고 시작해야 한다. 그렇지 않으면 가중치과 바이어스가 학습되지 않는다. ``` import chainer import numpy as np x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1) y = x * 2 + 1 class Model(chainer.Chain): def __init__(self): super().__init__() with self.init_scope(): self.layer = chainer.links.Linear(1,1) def forward(self, x): return self.layer(x) model = Model() #model = chainer.links.Linear(1,1) optimizer = chainer.optimizers.SGD() optimizer.setup(model) for _ in range(1000): output = model(x) loss = chainer.functions.mean_squared_error(y, output) model.zerograds() loss.backward() optimizer.update() model(x) ```
github_jupyter
# Exploration of incidents dataset ``` import psycopg2 import pandas as pd import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline def get_connection(schema="build_1_3_0"): conn = psycopg2.connect("dbname='proj001_lfb' user='postgres' " "host='10.152.234.53'") if schema: cur = conn.cursor() query = "SET search_path TO {}".format(schema) cur.execute(query) return conn ``` # Import data Note the code is now using a cleaned pipeline file from version `0.0.1` of the pipeline instead of the raw data file in `inputs`. ``` conn = get_connection() qry = ('select * ' 'from proj001_lfb_0_0_1.l2_incidents limit 500;' ) #data = pd.read_sql(qry, conn).drop(['store_no','store_type'], axis=1) data = pd.read_sql(qry, conn) data.head(10) ``` About the expected number of days for 4 years ``` qry = ('select count (distinct dateofcall_cln) ' 'from proj001_lfb_0_0_1.l2_incidents;' ) #data = pd.read_sql(qry, conn).drop(['store_no','store_type'], axis=1) data = pd.read_sql(qry, conn) data ``` # What types of incident occur? ``` qry = ('select incidentgroup,count (*) ' 'from proj001_lfb_0_0_1.l2_incidents ' 'group by incidentgroup ' 'order by count(*) desc;' ) #data = pd.read_sql(qry, conn).drop(['store_no','store_type'], axis=1) data = pd.read_sql(qry, conn) data ``` # How many incidents over time? Note because we are now use a pipeline dataset instead of a raw dataset, we no longer have to clean the dataofcall field in this notebook. Our code is simplified and more efficient ``` qry = ("select dateofcall_cln::date, incidentgroup, count(*) " "from proj001_lfb_0_0_1.l2_incidents " "group by dateofcall_cln::date, incidentgroup " "order by dateofcall_cln::date asc " ";" ) data = pd.read_sql(qry, conn) data.head() ``` However we still have to do type conversions after reading into pandas dataframes ``` data.dtypes data['dateofcall_cln'] = pd.to_datetime(data['dateofcall_cln']) data = data.sort_values('dateofcall_cln') data.head() # sns.tsplot(time="dateofcall_cln", value="count", condition="incidentgroup",data=data) sns.set(style='white', font_scale=1.7) fig, ax= plt.subplots(figsize=(15,10)) incident_types = data.incidentgroup.unique() for incident in incident_types: subdata = data[data.incidentgroup==incident] ax.plot( subdata['dateofcall_cln'], subdata['count'], label=incident) plt.legend() sns.despine() plt.xticks(rotation=45) plt.show() ``` # Which were the worst days? ``` data.sort_values('count', ascending=False).head(5) ```
github_jupyter
# Character Sequence to Sequence In this notebook, we'll build a model that takes in a sequence of letters, and outputs a sorted version of that sequence. We'll do that using what we've learned so far about Sequence to Sequence models. This notebook was updated to work with TensorFlow 1.1 and builds on the work of Dave Currie. Check out Dave's post [Text Summarization with Amazon Reviews](https://medium.com/towards-data-science/text-summarization-with-amazon-reviews-41801c2210b). <img src="images/sequence-to-sequence.jpg"/> ## Dataset The dataset lives in the /data/ folder. At the moment, it is made up of the following files: * **letters_source.txt**: The list of input letter sequences. Each sequence is its own line. * **letters_target.txt**: The list of target sequences we'll use in the training process. Each sequence here is a response to the input sequence in letters_source.txt with the same line number. ``` import numpy as np import time import helper source_path = 'data/letters_source.txt' target_path = 'data/letters_target.txt' source_sentences = helper.load_data(source_path) target_sentences = helper.load_data(target_path) ``` Let's start by examining the current state of the dataset. `source_sentences` contains the entire input sequence file as text delimited by newline symbols. ``` source_sentences[:50].split('\n') ``` `source_sentences` contains the entire output sequence file as text delimited by newline symbols. Each line corresponds to the line from `source_sentences`. `source_sentences` contains a sorted characters of the line. ``` target_sentences[:50].split('\n') ``` ## Preprocess To do anything useful with it, we'll need to turn the each string into a list of characters: <img src="images/source_and_target_arrays.png"/> Then convert the characters to their int values as declared in our vocabulary: ``` def extract_character_vocab(data): special_words = ['<PAD>', '<UNK>', '<GO>', '<EOS>'] set_words = set([character for line in data.split('\n') for character in line]) int_to_vocab = {word_i: word for word_i, word in enumerate(special_words + list(set_words))} vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()} return int_to_vocab, vocab_to_int # Build int2letter and letter2int dicts source_int_to_letter, source_letter_to_int = extract_character_vocab(source_sentences) target_int_to_letter, target_letter_to_int = extract_character_vocab(target_sentences) # Convert characters to ids source_letter_ids = [[source_letter_to_int.get(letter, source_letter_to_int['<UNK>']) for letter in line] for line in source_sentences.split('\n')] target_letter_ids = [[target_letter_to_int.get(letter, target_letter_to_int['<UNK>']) for letter in line] + [target_letter_to_int['<EOS>']] for line in target_sentences.split('\n')] print("Example source sequence") print(source_letter_ids[:3]) print("\n") print("Example target sequence") print(target_letter_ids[:3]) ``` This is the final shape we need them to be in. We can now proceed to building the model. ## Model #### Check the Version of TensorFlow This will check to make sure you have the correct version of TensorFlow ``` from distutils.version import LooseVersion import tensorflow as tf from tensorflow.python.layers.core import Dense # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) ``` ### Hyperparameters ``` # Number of Epochs epochs = 60 # Batch Size batch_size = 128 # RNN Size rnn_size = 50 # Number of Layers num_layers = 2 # Embedding Size encoding_embedding_size = 15 decoding_embedding_size = 15 # Learning Rate learning_rate = 0.001 ``` ### Input ``` def get_model_inputs(): input_data = tf.placeholder(tf.int32, [None, None], name='input') targets = tf.placeholder(tf.int32, [None, None], name='targets') lr = tf.placeholder(tf.float32, name='learning_rate') target_sequence_length = tf.placeholder(tf.int32, (None,), name='target_sequence_length') max_target_sequence_length = tf.reduce_max(target_sequence_length, name='max_target_len') source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length') return input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length ``` ### Sequence to Sequence Model We can now start defining the functions that will build the seq2seq model. We are building it from the bottom up with the following components: 2.1 Encoder - Embedding - Encoder cell 2.2 Decoder 1- Process decoder inputs 2- Set up the decoder - Embedding - Decoder cell - Dense output layer - Training decoder - Inference decoder 2.3 Seq2seq model connecting the encoder and decoder 2.4 Build the training graph hooking up the model with the optimizer ### 2.1 Encoder The first bit of the model we'll build is the encoder. Here, we'll embed the input data, construct our encoder, then pass the embedded data to the encoder. - Embed the input data using [`tf.contrib.layers.embed_sequence`](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/embed_sequence) <img src="images/embed_sequence.png" /> - Pass the embedded input into a stack of RNNs. Save the RNN state and ignore the output. <img src="images/encoder.png" /> ``` def encoding_layer(input_data, rnn_size, num_layers, source_sequence_length, source_vocab_size, encoding_embedding_size): # Encoder embedding enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, encoding_embedding_size) # RNN cell def make_cell(rnn_size): enc_cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)) return enc_cell enc_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)]) enc_output, enc_state = tf.nn.dynamic_rnn(enc_cell, enc_embed_input, sequence_length=source_sequence_length, dtype=tf.float32) return enc_output, enc_state ``` ## 2.2 Decoder The decoder is probably the most involved part of this model. The following steps are needed to create it: 1- Process decoder inputs 2- Set up the decoder components - Embedding - Decoder cell - Dense output layer - Training decoder - Inference decoder ### Process Decoder Input In the training process, the target sequences will be used in two different places: 1. Using them to calculate the loss 2. Feeding them to the decoder during training to make the model more robust. Now we need to address the second point. Let's assume our targets look like this in their letter/word form (we're doing this for readibility. At this point in the code, these sequences would be in int form): <img src="images/targets_1.png"/> We need to do a simple transformation on the tensor before feeding it to the decoder: 1- We will feed an item of the sequence to the decoder at each time step. Think about the last timestep -- where the decoder outputs the final word in its output. The input to that step is the item before last from the target sequence. The decoder has no use for the last item in the target sequence in this scenario. So we'll need to remove the last item. We do that using tensorflow's tf.strided_slice() method. We hand it the tensor, and the index of where to start and where to end the cutting. <img src="images/strided_slice_1.png"/> 2- The first item in each sequence we feed to the decoder has to be GO symbol. So We'll add that to the beginning. <img src="images/targets_add_go.png"/> Now the tensor is ready to be fed to the decoder. It looks like this (if we convert from ints to letters/symbols): <img src="images/targets_after_processing_1.png"/> ``` # Process the input we'll feed to the decoder def process_decoder_input(target_data, vocab_to_int, batch_size): '''Remove the last word id from each batch and concat the <GO> to the begining of each batch''' ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1]) dec_input = tf.concat([tf.fill([batch_size, 1], vocab_to_int['<GO>']), ending], 1) return dec_input ``` ### Set up the decoder components - Embedding - Decoder cell - Dense output layer - Training decoder - Inference decoder #### 1- Embedding Now that we have prepared the inputs to the training decoder, we need to embed them so they can be ready to be passed to the decoder. We'll create an embedding matrix like the following then have tf.nn.embedding_lookup convert our input to its embedded equivalent: <img src="images/embeddings.png" /> #### 2- Decoder Cell Then we declare our decoder cell. Just like the encoder, we'll use an tf.contrib.rnn.LSTMCell here as well. We need to declare a decoder for the training process, and a decoder for the inference/prediction process. These two decoders will share their parameters (so that all the weights and biases that are set during the training phase can be used when we deploy the model). First, we'll need to define the type of cell we'll be using for our decoder RNNs. We opted for LSTM. #### 3- Dense output layer Before we move to declaring our decoders, we'll need to create the output layer, which will be a tensorflow.python.layers.core.Dense layer that translates the outputs of the decoder to logits that tell us which element of the decoder vocabulary the decoder is choosing to output at each time step. #### 4- Training decoder Essentially, we'll be creating two decoders which share their parameters. One for training and one for inference. The two are similar in that both created using tf.contrib.seq2seq.**BasicDecoder** and tf.contrib.seq2seq.**dynamic_decode**. They differ, however, in that we feed the the target sequences as inputs to the training decoder at each time step to make it more robust. We can think of the training decoder as looking like this (except that it works with sequences in batches): <img src="images/sequence-to-sequence-training-decoder.png"/> The training decoder **does not** feed the output of each time step to the next. Rather, the inputs to the decoder time steps are the target sequence from the training dataset (the orange letters). #### 5- Inference decoder The inference decoder is the one we'll use when we deploy our model to the wild. <img src="images/sequence-to-sequence-inference-decoder.png"/> We'll hand our encoder hidden state to both the training and inference decoders and have it process its output. TensorFlow handles most of the logic for us. We just have to use the appropriate methods from tf.contrib.seq2seq and supply them with the appropriate inputs. ``` def decoding_layer(target_letter_to_int, decoding_embedding_size, num_layers, rnn_size, target_sequence_length, max_target_sequence_length, enc_state, dec_input): # 1. Decoder Embedding target_vocab_size = len(target_letter_to_int) dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size])) dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input) # 2. Construct the decoder cell def make_cell(rnn_size): dec_cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)) return dec_cell dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)]) # 3. Dense layer to translate the decoder's output at each time # step into a choice from the target vocabulary output_layer = Dense(target_vocab_size, kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1)) # 4. Set up a training decoder and an inference decoder # Training Decoder with tf.variable_scope("decode"): # Helper for the training process. Used by BasicDecoder to read inputs. training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input, sequence_length=target_sequence_length, time_major=False) # Basic decoder training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, training_helper, enc_state, output_layer) # Perform dynamic decoding using the decoder training_decoder_output, _ = tf.contrib.seq2seq.dynamic_decode(training_decoder, impute_finished=True, maximum_iterations=max_target_sequence_length) # 5. Inference Decoder # Reuses the same parameters trained by the training process with tf.variable_scope("decode", reuse=True): start_tokens = tf.tile(tf.constant([target_letter_to_int['<GO>']], dtype=tf.int32), [batch_size], name='start_tokens') # Helper for the inference process. inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings, start_tokens, target_letter_to_int['<EOS>']) # Basic decoder inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, inference_helper, enc_state, output_layer) # Perform dynamic decoding using the decoder inference_decoder_output, _ = tf.contrib.seq2seq.dynamic_decode(inference_decoder, impute_finished=True, maximum_iterations=max_target_sequence_length) return training_decoder_output, inference_decoder_output ``` ## 2.3 Seq2seq model Let's now go a step above, and hook up the encoder and decoder using the methods we just declared ``` def seq2seq_model(input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length, source_vocab_size, target_vocab_size, enc_embedding_size, dec_embedding_size, rnn_size, num_layers): # Pass the input data through the encoder. We'll ignore the encoder output, but use the state _, enc_state = encoding_layer(input_data, rnn_size, num_layers, source_sequence_length, source_vocab_size, encoding_embedding_size) # Prepare the target sequences we'll feed to the decoder in training mode dec_input = process_decoder_input(targets, target_letter_to_int, batch_size) # Pass encoder state and decoder inputs to the decoders training_decoder_output, inference_decoder_output = decoding_layer(target_letter_to_int, decoding_embedding_size, num_layers, rnn_size, target_sequence_length, max_target_sequence_length, enc_state, dec_input) return training_decoder_output, inference_decoder_output ``` Model outputs *training_decoder_output* and *inference_decoder_output* both contain a 'rnn_output' logits tensor that looks like this: <img src="images/logits.png"/> The logits we get from the training tensor we'll pass to tf.contrib.seq2seq.**sequence_loss()** to calculate the loss and ultimately the gradient. ``` # Build the graph train_graph = tf.Graph() # Set the graph to default to ensure that it is ready for training with train_graph.as_default(): # Load the model inputs input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length = get_model_inputs() # Create the training and inference logits training_decoder_output, inference_decoder_output = seq2seq_model(input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length, len(source_letter_to_int), len(target_letter_to_int), encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers) # Create tensors for the training logits and inference logits training_logits = tf.identity(training_decoder_output.rnn_output, 'logits') inference_logits = tf.identity(inference_decoder_output.sample_id, name='predictions') # Create the weights for sequence_loss masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks') with tf.name_scope("optimization"): # Loss function cost = tf.contrib.seq2seq.sequence_loss( training_logits, targets, masks) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None] train_op = optimizer.apply_gradients(capped_gradients) ``` ## Get Batches There's little processing involved when we retreive the batches. This is a simple example assuming batch_size = 2 Source sequences (it's actually in int form, we're showing the characters for clarity): <img src="images/source_batch.png" /> Target sequences (also in int, but showing letters for clarity): <img src="images/target_batch.png" /> ``` def pad_sentence_batch(sentence_batch, pad_int): """Pad sentences with <PAD> so that each sentence of a batch has the same length""" max_sentence = max([len(sentence) for sentence in sentence_batch]) return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch] def get_batches(targets, sources, batch_size, source_pad_int, target_pad_int): """Batch targets, sources, and the lengths of their sentences together""" for batch_i in range(0, len(sources)//batch_size): start_i = batch_i * batch_size sources_batch = sources[start_i:start_i + batch_size] targets_batch = targets[start_i:start_i + batch_size] pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int)) pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int)) # Need the lengths for the _lengths parameters pad_targets_lengths = [] for target in pad_targets_batch: pad_targets_lengths.append(len(target)) pad_source_lengths = [] for source in pad_sources_batch: pad_source_lengths.append(len(source)) yield pad_targets_batch, pad_sources_batch, pad_targets_lengths, pad_source_lengths ``` ## Train We're now ready to train our model. If you run into OOM (out of memory) issues during training, try to decrease the batch_size. ``` # Split data to training and validation sets train_source = source_letter_ids[batch_size:] train_target = target_letter_ids[batch_size:] valid_source = source_letter_ids[:batch_size] valid_target = target_letter_ids[:batch_size] (valid_targets_batch, valid_sources_batch, valid_targets_lengths, valid_sources_lengths) = next(get_batches(valid_target, valid_source, batch_size, source_letter_to_int['<PAD>'], target_letter_to_int['<PAD>'])) display_step = 20 # Check training loss after every 20 batches checkpoint = "best_model.ckpt" with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(1, epochs+1): for batch_i, (targets_batch, sources_batch, targets_lengths, sources_lengths) in enumerate( get_batches(train_target, train_source, batch_size, source_letter_to_int['<PAD>'], target_letter_to_int['<PAD>'])): # Training step _, loss = sess.run( [train_op, cost], {input_data: sources_batch, targets: targets_batch, lr: learning_rate, target_sequence_length: targets_lengths, source_sequence_length: sources_lengths}) # Debug message updating us on the status of the training if batch_i % display_step == 0 and batch_i > 0: # Calculate validation cost validation_loss = sess.run( [cost], {input_data: valid_sources_batch, targets: valid_targets_batch, lr: learning_rate, target_sequence_length: valid_targets_lengths, source_sequence_length: valid_sources_lengths}) print('Epoch {:>3}/{} Batch {:>4}/{} - Loss: {:>6.3f} - Validation loss: {:>6.3f}' .format(epoch_i, epochs, batch_i, len(train_source) // batch_size, loss, validation_loss[0])) # Save Model saver = tf.train.Saver() saver.save(sess, checkpoint) print('Model Trained and Saved') ``` ## Prediction ``` def source_to_seq(text): '''Prepare the text for the model''' sequence_length = 7 return [source_letter_to_int.get(word, source_letter_to_int['<UNK>']) for word in text]+ [source_letter_to_int['<PAD>']]*(sequence_length-len(text)) input_sentence = 'hello' text = source_to_seq(input_sentence) checkpoint = "./best_model.ckpt" loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(checkpoint + '.meta') loader.restore(sess, checkpoint) input_data = loaded_graph.get_tensor_by_name('input:0') logits = loaded_graph.get_tensor_by_name('predictions:0') source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0') target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0') #Multiply by batch_size to match the model's input parameters answer_logits = sess.run(logits, {input_data: [text]*batch_size, target_sequence_length: [len(text)]*batch_size, source_sequence_length: [len(text)]*batch_size})[0] pad = source_letter_to_int["<PAD>"] print('Original Text:', input_sentence) print('\nSource') print(' Word Ids: {}'.format([i for i in text])) print(' Input Words: {}'.format(" ".join([source_int_to_letter[i] for i in text]))) print('\nTarget') print(' Word Ids: {}'.format([i for i in answer_logits if i != pad])) print(' Response Words: {}'.format(" ".join([target_int_to_letter[i] for i in answer_logits if i != pad]))) ```
github_jupyter
``` !pip install keras from keras.models import Model from keras.optimizers import SGD,Adam,RMSprop # from keras.layers import Dense, Input, LSTM, Embedding,Dropout,Bidirectional,Flatten from keras.layers import * import os # from __future__ import print_function from keras import backend as K from keras.engine.topology import Layer import h5py from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, classification_report, accuracy_score import pandas as pd import numpy as np import keras # Position_Embedding #! -*- coding: utf-8 -*- #%% class Position_Embedding(Layer): def __init__(self, size=None, mode='sum', **kwargs): self.size = size #必须为偶数 self.mode = mode super(Position_Embedding, self).__init__(**kwargs) def call(self, x): if (self.size == None) or (self.mode == 'sum'): self.size = int(x.shape[-1]) batch_size,seq_len = K.shape(x)[0],K.shape(x)[1] position_j = 1. / K.pow(10000., \ 2 * K.arange(self.size / 2, dtype='float32' \ ) / self.size) position_j = K.expand_dims(position_j, 0) position_i = K.cumsum(K.ones_like(x[:,:,0]), 1)-1 #K.arange不支持变长,只好用这种方法生成 position_i = K.expand_dims(position_i, 2) position_ij = K.dot(position_i, position_j) position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2) if self.mode == 'sum': return position_ij + x elif self.mode == 'concat': return K.concatenate([position_ij, x], 2) def compute_output_shape(self, input_shape): if self.mode == 'sum': return input_shape elif self.mode == 'concat': return (input_shape[0], input_shape[1], input_shape[2]+self.size) # attention class Attention(Layer): def __init__(self, nb_head, size_per_head, **kwargs): self.nb_head = nb_head self.size_per_head = size_per_head self.output_dim = nb_head*size_per_head super(Attention, self).__init__(**kwargs) def build(self, input_shape): self.WQ = self.add_weight(name='WQ', shape=(input_shape[0][-1], self.output_dim), initializer='glorot_uniform', trainable=True) self.WK = self.add_weight(name='WK', shape=(input_shape[1][-1], self.output_dim), initializer='glorot_uniform', trainable=True) self.WV = self.add_weight(name='WV', shape=(input_shape[2][-1], self.output_dim), initializer='glorot_uniform', trainable=True) super(Attention, self).build(input_shape) def Mask(self, inputs, seq_len, mode='mul'): if seq_len == None: return inputs else: mask = K.one_hot(seq_len[:,0], K.shape(inputs)[1]) mask = 1 - K.cumsum(mask, 1) for _ in range(len(inputs.shape)-2): mask = K.expand_dims(mask, 2) if mode == 'mul': return inputs * mask if mode == 'add': return inputs - (1 - mask) * 1e12 def call(self, x): #如果只传入Q_seq,K_seq,V_seq,那么就不做Mask #如果同时传入Q_seq,K_seq,V_seq,Q_len,V_len,那么对多余部分做Mask if len(x) == 3: Q_seq,K_seq,V_seq = x Q_len,V_len = None,None elif len(x) == 5: Q_seq,K_seq,V_seq,Q_len,V_len = x #对Q、K、V做线性变换 Q_seq = K.dot(Q_seq, self.WQ) Q_seq = K.reshape(Q_seq, (-1, K.shape(Q_seq)[1], self.nb_head, self.size_per_head)) Q_seq = K.permute_dimensions(Q_seq, (0,2,1,3)) K_seq = K.dot(K_seq, self.WK) K_seq = K.reshape(K_seq, (-1, K.shape(K_seq)[1], self.nb_head, self.size_per_head)) K_seq = K.permute_dimensions(K_seq, (0,2,1,3)) V_seq = K.dot(V_seq, self.WV) V_seq = K.reshape(V_seq, (-1, K.shape(V_seq)[1], self.nb_head, self.size_per_head)) V_seq = K.permute_dimensions(V_seq, (0,2,1,3)) #计算内积,然后mask,然后softmax A = K.batch_dot(Q_seq, K_seq, axes=[3,3]) / self.size_per_head**0.5 A = K.permute_dimensions(A, (0,3,2,1)) A = self.Mask(A, V_len, 'add') A = K.permute_dimensions(A, (0,3,2,1)) A = K.softmax(A) #输出并mask O_seq = K.batch_dot(A, V_seq, axes=[3,2]) O_seq = K.permute_dimensions(O_seq, (0,2,1,3)) O_seq = K.reshape(O_seq, (-1, K.shape(O_seq)[1], self.output_dim)) O_seq = self.Mask(O_seq, Q_len, 'mul') return O_seq def compute_output_shape(self, input_shape): return (input_shape[0][0], input_shape[0][1], self.output_dim) def buid_model(): # LSTM 模型 print('lstm model start...\n') # 标题输入:接收一个含有 200 个整数的序列,每个整数在 1 到 3812202 之间。 main_input1 = Input(shape=(200,), name='main_input1', dtype='int32') emb1 = Embedding(output_dim=16, input_dim=3812203, input_length=200,mask_zero = False)(main_input1) emb1 = Position_Embedding()(emb1) main_input2 = Input(shape=(200,), name='main_input2', dtype='int32') emb2 = Embedding(output_dim=16, input_dim=62966, input_length=200,mask_zero = False)(main_input2) emb2 = Position_Embedding()(emb2) main_input3 = Input(shape=(200,), name='main_input3', dtype='int32') emb3 = Embedding(output_dim=16, input_dim=4445721, input_length=200,mask_zero = False)(main_input3) emb3 = Position_Embedding()(emb3) emb = keras.layers.concatenate([emb1, emb2, emb3]) O_seq = Attention(8,16)([emb,emb,emb]) O_seq = GlobalAveragePooling1D()(O_seq) # O_seq = Dropout(0.5)(O_seq)#尽量不要用 # lstm_out = Bidirectional(LSTM(10,activation='softsign',return_sequences=True))(O_seq) # lstm_out = GlobalAveragePooling1D()(lstm_out) outputs = Dense(10, activation='softmax', name='main_output')(O_seq) # model = Model(inputs=S_inputs, outputs=outputs) # # try using different optimizers and different optimizer configs # opt = Adam(lr=0.0005) # loss = 'categorical_crossentropy' # model.compile(loss=loss, # optimizer=opt, # metrics=['accuracy']) # 定义一个具有两个输入输出的模型 model = keras.models.Model(inputs=[main_input1,main_input2,main_input3],#,auxiliary_input], outputs=[outputs]) # 这里的输入输出顺序与fit时一致就好 # opt = RMSprop(lr=0.01, clipnorm=1.0) opt = Adam(lr=0.01) model.compile(optimizer=opt, sample_weight_mode='None',#"temporal", loss={'main_output': 'categorical_crossentropy'}, metrics=['accuracy']) print(model.summary()) return model def data_load(): print('loading data ... \n') with h5py.File('../lstm_model_ad_id/word_train_ad.h5', 'r') as f: data = np.array(f.get('word_data')) label = pd.read_csv('../../train_preliminary/user.csv').sort_values(by=['user_id']) train_x, test_x, train_y, test_y = train_test_split(data, label, test_size=0.2, random_state=2020) train_y_age = train_y['age'].values - 1 train_y_age = keras.utils.np_utils.to_categorical(train_y_age, num_classes=10) train_y_gender = train_y['gender'].values - 1 test_y_age = test_y['age'].values - 1 test_y_age = keras.utils.np_utils.to_categorical(test_y_age, num_classes=10) test_y_gender = test_y['gender'].values - 1 print('get data ... \n') return train_x, test_x, train_y_age, train_y_gender,test_y_age,test_y_gender def load_data2(): with h5py.File('../lstm_model_advertiser_id/word_train_advertiser_id.h5', 'r') as f: data = np.array(f.get('word_data')) train_x, test_x= train_test_split(data, test_size=0.2, random_state=2020) return train_x, test_x def load_data3(): with h5py.File('../lstm_model_creative_id/word_train_creative_id.h5', 'r') as f: data = np.array(f.get('word_data')) train_x, test_x= train_test_split(data, test_size=0.2, random_state=2020) return train_x, test_x def get_filename_for_saving(save_dir): return os.path.join(save_dir, "trainsform_comb_age_adm_0.01_{val_loss:.3f}-{val_acc:.3f}-{epoch:03d}-{loss:.3f}-{acc:.3f}.hdf5") model = buid_model() print('lstm model geted...\n') print(model.summary()) train_x, test_x, train_y_age, train_y_gender,test_y_age,test_y_gender = data_load() train_x2, test_x2 = load_data2() train_x3, test_x3 = load_data3() print('lstm model fit...\n') checkpointer = keras.callbacks.ModelCheckpoint( filepath=get_filename_for_saving(''), save_best_only=False) stopping = keras.callbacks.EarlyStopping(patience=8) reduce_lr = keras.callbacks.ReduceLROnPlateau(factor=0.1, patience=2, min_lr=0.0001) model.fit({'main_input1': train_x ,'main_input2': train_x2,'main_input3': train_x3}, {'main_output': train_y_age}, epochs=100, batch_size=256, validation_data=({'main_input1': test_x,'main_input2': test_x2,'main_input3': test_x3}, {'main_output': test_y_age}), callbacks=[checkpointer, reduce_lr, stopping]) # pre = model.predict(test_x,verbose=1) # #评估结果 # from sklearn.metrics import confusion_matrix, classification_report # y_ = np.reshape(np.argmax(test_y,axis=1),[-1]) # pre_ = np.reshape(np.argmax(pre, axis=1),[-1]) # #每个类的各项指标 # cm = confusion_matrix(y_, pre_) # # np.set_printoptions(precision=3) # cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # print(cm_normalized) # print(classification_report(y_, pre_)) ```
github_jupyter
# High-Dimensional Bayesian Optimization with SAASBO This tutorial shows how to use the Sparse Axis-Aligned Subspace Bayesian Optimization (SAASBO) method for high-dimensional Bayesian optimization [1]. SAASBO uses sparse axis-aligned subspace priors to avoid overfitting. Specifically, SAASBO uses a hierarchical sparsity prior consisting of a global shrinkage parameter with a Half-Cauchy prior $\tau \sim \mathcal{HC}(\beta)$ and inverse lengthscales $\rho_d \sim \mathcal{HC}(\tau)$ for $d=1, ..., D$. See [1] for details. [1] D. Eriksson, M. Jankowiak. High-Dimensional Bayesian Optimization with Sparse Axis-Aligned Subspaces. Proceedings of the Thirty-Seventh Conference on Uncertainty in Artificial Intelligence, 2021. ``` from ax import ParameterType, RangeParameter, SearchSpace, SimpleExperiment from ax.benchmark.benchmark import full_benchmark_run from ax.benchmark.benchmark_result import aggregate_problem_results, BenchmarkResult from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy from ax.modelbridge.registry import Models import torch tkwargs = { "dtype": torch.double, "device": torch.device("cuda" if torch.cuda.is_available() else "cpu"), } ``` ## Setup methods ``` gpei = GenerationStrategy( steps=[ GenerationStep(model=Models.SOBOL, num_trials=10), GenerationStep( model=Models.BOTORCH, num_trials=-1, ), ], name="Sobol+GPEI" ) saasbo = GenerationStrategy( steps=[ GenerationStep(model=Models.SOBOL, num_trials=10), GenerationStep( model=Models.FULLYBAYESIAN, num_trials=-1, model_kwargs={ "num_samples": 256, "warmup_steps": 512, "disable_progbar": True, "torch_device": tkwargs["device"], "torch_dtype": tkwargs["dtype"], }, ), ], name="SAASBO" ) ``` ## Setup search space and metric In this simple experiment we use the Branin function embedded in a 30-dimensional space. ``` from ax.core.objective import Objective from ax.core.optimization_config import OptimizationConfig from ax.metrics.branin import BraninMetric from ax.benchmark.benchmark_problem import BenchmarkProblem branin_30 = BenchmarkProblem( name="Branin, D=30", optimal_value=0.397887, optimization_config=OptimizationConfig( objective=Objective( metric=BraninMetric( name="objective", param_names=["x9", "x24"], noise_sd=0.0 ), minimize=True, ) ), search_space=SearchSpace( parameters=[ RangeParameter( name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=-5.0, upper=10.0 ) for i in range(15) ] + [ RangeParameter( name=f"x{i + 15}", parameter_type=ParameterType.FLOAT, lower=0.0, upper=15.0, ) for i in range(15) ] ), evaluate_suggested=False, ) ``` ## Run benchmark ``` benchmarking_experiments = full_benchmark_run( problem_groups={"default": [branin_30]}, method_groups={"default": [gpei, saasbo]}, num_replications=1, num_trials=30, batch_size=1, raise_all_exceptions=True, verbose_logging=True ) ``` ## Aggregate results ``` res = aggregate_problem_results(benchmarking_experiments[branin_30.name], problem=branin_30) res_gp_ei = res.true_performance['Sobol+GPEI'].ravel() res_saasbo = res.true_performance["SAASBO"].ravel() ``` ## Plot results ``` import matplotlib import matplotlib.pyplot as plt import numpy as np %matplotlib inline matplotlib.rcParams.update({"font.size": 16}) fig, ax = plt.subplots(figsize=(8, 6)) ax.plot(np.minimum.accumulate(res_saasbo), color="b", label="SAASBO") ax.plot(np.minimum.accumulate(res_gp_ei), color="r", label="GP-EI") ax.plot([0, len(res_saasbo)], [res.optimum, res.optimum], "--", c="g", lw=3, label="Optimal value") ax.grid(True) ax.set_title("Branin, D=30", fontsize=20) ax.set_xlabel("Number of evaluations", fontsize=20) ax.set_xlim([0, len(res_saasbo)]) ax.set_ylabel("Best value found", fontsize=20) ax.set_ylim([0, 8]) ax.legend(fontsize=18) plt.show() ```
github_jupyter
# Writing a training loop from scratch **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2019/03/01<br> **Last modified:** 2020/04/15<br> **Description:** Complete guide to writing low-level training & evaluation loops. ## Setup ``` import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import numpy as np ``` ## Introduction Keras provides default training and evaluation loops, `fit()` and `evaluate()`. Their usage is coverered in the guide [Training & evaluation with the built-in methods](/guides/training_with_built_in_methods/). If you want to customize the learning algorithm of your model while still leveraging the convenience of `fit()` (for instance, to train a GAN using `fit()`), you can subclass the `Model` class and implement your own `train_step()` method, which is called repeatedly during `fit()`. This is covered in the guide [Customizing what happens in `fit()`](/guides/customizing_what_happens_in_fit/). Now, if you want very low-level control over training & evaluation, you should write your own training & evaluation loops from scratch. This is what this guide is about. ## Using the `GradientTape`: a first end-to-end example Calling a model inside a `GradientTape` scope enables you to retrieve the gradients of the trainable weights of the layer with respect to a loss value. Using an optimizer instance, you can use these gradients to update these variables (which you can retrieve using `model.trainable_weights`). Let's consider a simple MNIST model: ``` inputs = keras.Input(shape=(784,), name="digits") x1 = layers.Dense(64, activation="relu")(inputs) x2 = layers.Dense(64, activation="relu")(x1) outputs = layers.Dense(10, name="predictions")(x2) model = keras.Model(inputs=inputs, outputs=outputs) ``` Let's train it using mini-batch gradient with a custom training loop. First, we're going to need an optimizer, a loss function, and a dataset: ``` # Instantiate an optimizer. optimizer = keras.optimizers.SGD(learning_rate=1e-3) # Instantiate a loss function. loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Prepare the training dataset. batch_size = 64 (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() x_train = np.reshape(x_train, (-1, 784)) x_test = np.reshape(x_test, (-1, 784)) train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size) ``` Here's our training loop: - We open a `for` loop that iterates over epochs - For each epoch, we open a `for` loop that iterates over the dataset, in batches - For each batch, we open a `GradientTape()` scope - Inside this scope, we call the model (forward pass) and compute the loss - Outside the scope, we retrieve the gradients of the weights of the model with regard to the loss - Finally, we use the optimizer to update the weights of the model based on the gradients ``` epochs = 2 for epoch in range(epochs): print("\nStart of epoch %d" % (epoch,)) # Iterate over the batches of the dataset. for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): # Open a GradientTape to record the operations run # during the forward pass, which enables autodifferentiation. with tf.GradientTape() as tape: # Run the forward pass of the layer. # The operations that the layer applies # to its inputs are going to be recorded # on the GradientTape. logits = model(x_batch_train, training=True) # Logits for this minibatch # Compute the loss value for this minibatch. loss_value = loss_fn(y_batch_train, logits) # Use the gradient tape to automatically retrieve # the gradients of the trainable variables with respect to the loss. grads = tape.gradient(loss_value, model.trainable_weights) # Run one step of gradient descent by updating # the value of the variables to minimize the loss. optimizer.apply_gradients(zip(grads, model.trainable_weights)) # Log every 200 batches. if step % 200 == 0: print( "Training loss (for one batch) at step %d: %.4f" % (step, float(loss_value)) ) print("Seen so far: %s samples" % ((step + 1) * 64)) ``` ## Low-level handling of metrics Let's add metrics monitoring to this basic loop. You can readily reuse the built-in metrics (or custom ones you wrote) in such training loops written from scratch. Here's the flow: - Instantiate the metric at the start of the loop - Call `metric.update_state()` after each batch - Call `metric.result()` when you need to display the current value of the metric - Call `metric.reset_states()` when you need to clear the state of the metric (typically at the end of an epoch) Let's use this knowledge to compute `SparseCategoricalAccuracy` on validation data at the end of each epoch: ``` # Get model inputs = keras.Input(shape=(784,), name="digits") x = layers.Dense(64, activation="relu", name="dense_1")(inputs) x = layers.Dense(64, activation="relu", name="dense_2")(x) outputs = layers.Dense(10, name="predictions")(x) model = keras.Model(inputs=inputs, outputs=outputs) # Instantiate an optimizer to train the model. optimizer = keras.optimizers.SGD(learning_rate=1e-3) # Instantiate a loss function. loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Prepare the metrics. train_acc_metric = keras.metrics.SparseCategoricalAccuracy() val_acc_metric = keras.metrics.SparseCategoricalAccuracy() # Prepare the training dataset. batch_size = 64 train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size) # Prepare the validation dataset. # Reserve 10,000 samples for validation. x_val = x_train[-10000:] y_val = y_train[-10000:] x_train = x_train[:-10000] y_train = y_train[:-10000] val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) val_dataset = val_dataset.batch(64) ``` Here's our training & evaluation loop: ``` import time epochs = 2 for epoch in range(epochs): print("\nStart of epoch %d" % (epoch,)) start_time = time.time() # Iterate over the batches of the dataset. for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): with tf.GradientTape() as tape: logits = model(x_batch_train, training=True) loss_value = loss_fn(y_batch_train, logits) grads = tape.gradient(loss_value, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights)) # Update training metric. train_acc_metric.update_state(y_batch_train, logits) # Log every 200 batches. if step % 200 == 0: print( "Training loss (for one batch) at step %d: %.4f" % (step, float(loss_value)) ) print("Seen so far: %d samples" % ((step + 1) * 64)) # Display metrics at the end of each epoch. train_acc = train_acc_metric.result() print("Training acc over epoch: %.4f" % (float(train_acc),)) # Reset training metrics at the end of each epoch train_acc_metric.reset_states() # Run a validation loop at the end of each epoch. for x_batch_val, y_batch_val in val_dataset: val_logits = model(x_batch_val, training=False) # Update val metrics val_acc_metric.update_state(y_batch_val, val_logits) val_acc = val_acc_metric.result() val_acc_metric.reset_states() print("Validation acc: %.4f" % (float(val_acc),)) print("Time taken: %.2fs" % (time.time() - start_time)) ``` ## Speeding-up your training step with `tf.function` The default runtime in TensorFlow 2.0 is [eager execution](https://www.tensorflow.org/guide/eager). As such, our training loop above executes eagerly. This is great for debugging, but graph compilation has a definite performance advantage. Decribing your computation as a static graph enables the framework to apply global performance optimizations. This is impossible when the framework is constrained to greedly execute one operation after another, with no knowledge of what comes next. You can compile into a static graph any function that take tensors as input. Just add a `@tf.function` decorator on it, like this: ``` @tf.function def train_step(x, y): with tf.GradientTape() as tape: logits = model(x, training=True) loss_value = loss_fn(y, logits) grads = tape.gradient(loss_value, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights)) train_acc_metric.update_state(y, logits) return loss_value ``` Let's do the same with the evaluation step: ``` @tf.function def test_step(x, y): val_logits = model(x, training=False) val_acc_metric.update_state(y, val_logits) ``` Now, let's re-run our training loop with this compiled training step: ``` import time epochs = 2 for epoch in range(epochs): print("\nStart of epoch %d" % (epoch,)) start_time = time.time() # Iterate over the batches of the dataset. for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): loss_value = train_step(x_batch_train, y_batch_train) # Log every 200 batches. if step % 200 == 0: print( "Training loss (for one batch) at step %d: %.4f" % (step, float(loss_value)) ) print("Seen so far: %d samples" % ((step + 1) * 64)) # Display metrics at the end of each epoch. train_acc = train_acc_metric.result() print("Training acc over epoch: %.4f" % (float(train_acc),)) # Reset training metrics at the end of each epoch train_acc_metric.reset_states() # Run a validation loop at the end of each epoch. for x_batch_val, y_batch_val in val_dataset: test_step(x_batch_val, y_batch_val) val_acc = val_acc_metric.result() val_acc_metric.reset_states() print("Validation acc: %.4f" % (float(val_acc),)) print("Time taken: %.2fs" % (time.time() - start_time)) ``` Much faster, isn't it? ## Low-level handling of losses tracked by the model Layers & models recursively track any losses created during the forward pass by layers that call `self.add_loss(value)`. The resulting list of scalar loss values are available via the property `model.losses` at the end of the forward pass. If you want to be using these loss components, you should sum them and add them to the main loss in your training step. Consider this layer, that creates an activity regularization loss: ``` class ActivityRegularizationLayer(layers.Layer): def call(self, inputs): self.add_loss(1e-2 * tf.reduce_sum(inputs)) return inputs ``` Let's build a really simple model that uses it: ``` inputs = keras.Input(shape=(784,), name="digits") x = layers.Dense(64, activation="relu")(inputs) # Insert activity regularization as a layer x = ActivityRegularizationLayer()(x) x = layers.Dense(64, activation="relu")(x) outputs = layers.Dense(10, name="predictions")(x) model = keras.Model(inputs=inputs, outputs=outputs) ``` Here's what our training step should look like now: ``` @tf.function def train_step(x, y): with tf.GradientTape() as tape: logits = model(x, training=True) loss_value = loss_fn(y, logits) # Add any extra losses created during the forward pass. loss_value += sum(model.losses) grads = tape.gradient(loss_value, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights)) train_acc_metric.update_state(y, logits) return loss_value ``` ## Summary Now you know everything there is to know about using built-in training loops and writing your own from scratch. To conclude, here's a simple end-to-end example that ties together everything you've learned in this guide: a DCGAN trained on MNIST digits. ## End-to-end example: a GAN training loop from scratch You may be familiar with Generative Adversarial Networks (GANs). GANs can generate new images that look almost real, by learning the latent distribution of a training dataset of images (the "latent space" of the images). A GAN is made of two parts: a "generator" model that maps points in the latent space to points in image space, an a "discriminator" model, a classifier that can tell the difference between real imagees (from the training dataset) and fake images (the output of the generator network). A GAN training loop looks like this: 1) Train the discriminator. - Sample a batch of random points in the latent space. - Turn the points into fake images via the "generator" model. - Get a batch of real images and combine them with the generated images. - Train the "discriminator" model to classify generated vs. real images. 2) Train the generator. - Sample random points in the latent space. - Turn the points into fake images via the "generator" network. - Get a batch of real images and combine them with the generated images. - Train the "generator" model to "fool" the discriminator and classify the fake images as real. For a much more detailed overview of how GANs works, see [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python). Let's implement this training loop. First, create the discriminator meant to classify fake vs real digits: ``` discriminator = keras.Sequential( [ keras.Input(shape=(28, 28, 1)), layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"), layers.LeakyReLU(alpha=0.2), layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"), layers.LeakyReLU(alpha=0.2), layers.GlobalMaxPooling2D(), layers.Dense(1), ], name="discriminator", ) discriminator.summary() ``` Then let's create a generator network, that turns latent vectors into outputs of shape `(28, 28, 1)` (representing MNIST digits): ``` latent_dim = 128 generator = keras.Sequential( [ keras.Input(shape=(latent_dim,)), # We want to generate 128 coefficients to reshape into a 7x7x128 map layers.Dense(7 * 7 * 128), layers.LeakyReLU(alpha=0.2), layers.Reshape((7, 7, 128)), layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"), layers.LeakyReLU(alpha=0.2), layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"), layers.LeakyReLU(alpha=0.2), layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"), ], name="generator", ) ``` Here's the key bit: the training loop. As you can see it is quite straightforward. The training step function only takes 17 lines. ``` # Instantiate one optimizer for the discriminator and another for the generator. d_optimizer = keras.optimizers.Adam(learning_rate=0.0003) g_optimizer = keras.optimizers.Adam(learning_rate=0.0004) # Instantiate a loss function. loss_fn = keras.losses.BinaryCrossentropy(from_logits=True) @tf.function def train_step(real_images): # Sample random points in the latent space random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim)) # Decode them to fake images generated_images = generator(random_latent_vectors) # Combine them with real images combined_images = tf.concat([generated_images, real_images], axis=0) # Assemble labels discriminating real from fake images labels = tf.concat( [tf.ones((batch_size, 1)), tf.zeros((real_images.shape[0], 1))], axis=0 ) # Add random noise to the labels - important trick! labels += 0.05 * tf.random.uniform(labels.shape) # Train the discriminator with tf.GradientTape() as tape: predictions = discriminator(combined_images) d_loss = loss_fn(labels, predictions) grads = tape.gradient(d_loss, discriminator.trainable_weights) d_optimizer.apply_gradients(zip(grads, discriminator.trainable_weights)) # Sample random points in the latent space random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim)) # Assemble labels that say "all real images" misleading_labels = tf.zeros((batch_size, 1)) # Train the generator (note that we should *not* update the weights # of the discriminator)! with tf.GradientTape() as tape: predictions = discriminator(generator(random_latent_vectors)) g_loss = loss_fn(misleading_labels, predictions) grads = tape.gradient(g_loss, generator.trainable_weights) g_optimizer.apply_gradients(zip(grads, generator.trainable_weights)) return d_loss, g_loss, generated_images ``` Let's train our GAN, by repeatedly calling `train_step` on batches of images. Since our discriminator and generator are convnets, you're going to want to run this code on a GPU. ``` import os # Prepare the dataset. We use both the training & test MNIST digits. batch_size = 64 (x_train, _), (x_test, _) = keras.datasets.mnist.load_data() all_digits = np.concatenate([x_train, x_test]) all_digits = all_digits.astype("float32") / 255.0 all_digits = np.reshape(all_digits, (-1, 28, 28, 1)) dataset = tf.data.Dataset.from_tensor_slices(all_digits) dataset = dataset.shuffle(buffer_size=1024).batch(batch_size) epochs = 1 # In practice you need at least 20 epochs to generate nice digits. save_dir = "./" for epoch in range(epochs): print("\nStart epoch", epoch) for step, real_images in enumerate(dataset): # Train the discriminator & generator on one batch of real images. d_loss, g_loss, generated_images = train_step(real_images) # Logging. if step % 200 == 0: # Print metrics print("discriminator loss at step %d: %.2f" % (step, d_loss)) print("adversarial loss at step %d: %.2f" % (step, g_loss)) # Save one generated image img = tf.keras.preprocessing.image.array_to_img( generated_images[0] * 255.0, scale=False ) img.save(os.path.join(save_dir, "generated_img" + str(step) + ".png")) # To limit execution time we stop after 10 steps. # Remove the lines below to actually train the model! if step > 10: break ``` That's it! You'll get nice-looking fake MNIST digits after just ~30s of training on the Colab GPU.
github_jupyter
# MNIST Classification In this lesson we discuss in how to create a simple IPython Notebook to solve an image classification problem. MNIST contains a set of pictures ## Import Libraries Note: https://python-future.org/quickstart.html ``` from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.utils import to_categorical, plot_model from keras.datasets import mnist ``` ## Warm Up Exercise ## Pre-process data ### Load data First we load the data from the inbuilt mnist dataset from Keras Here we have to split the data set into training and testing data. The training data or testing data has two components. Training features and training labels. For instance every sample in the dataset has a corresponding label. In Mnist the training sample contains image data represented in terms of an array. The training labels are from 0-9. Here we say x_train for training data features and y_train as the training labels. Same goes for testing data. ``` (x_train, y_train), (x_test, y_test) = mnist.load_data() ``` ### Identify Number of Classes As this is a number classification problem. We need to know how many classes are there. So we'll count the number of unique labels. ``` num_labels = len(np.unique(y_train)) num_labels ``` ### Convert Labels To One-Hot Vector Read more on one-hot vector. ``` y_train = to_categorical(y_train) y_test = to_categorical(y_test) ``` ## Image Reshaping The training model is designed by considering the data as a vector. This is a model dependent modification. Here we assume the image is a squared shape image. ``` image_size = x_train.shape[1] input_size = image_size * image_size ``` ## Resize and Normalize The next step is to continue the reshaping to a fit into a vector and normalize the data. Image values are from 0 - 255, so an easy way to normalize is to divide by the maximum value. ``` x_train = np.reshape(x_train, [-1, input_size]) x_train = x_train.astype('float32') / 255 x_test = np.reshape(x_test, [-1, input_size]) x_test = x_test.astype('float32') / 255 ``` ## Create a Keras Model Keras is a neural network library. The summary function provides tabular summary on the model you created. And the plot_model function provides a grpah on the network you created. ``` # Create Model # network parameters batch_size = 128 hidden_units = 64 model = Sequential() model.add(Dense(hidden_units, input_dim=input_size)) model.add(Dense(num_labels)) model.add(Activation('softmax')) model.summary() plot_model(model, to_file='mlp-mnist.png', show_shapes=True) ``` ## Compile and Train A keras model need to be compiled before it can be used to train the model. In the compile function, you can provide the optimization that you want to add, metrics you expect and the type of loss function you need to use. Here we use adam optimizer, a famous optimizer used in neural networks. The loss funtion we have used is the categorical_crossentropy. Once the model is compiled, then the fit function is called upon passing the number of epochs, traing data and batch size. The batch size determines the number of elements used per minibatch in optimizing the function. **Note: Change the number of epochs, batch size and see what happens.** ``` model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(x_train, y_train, epochs=20, batch_size=batch_size) ``` ## Testing Now we can test the trained model. Use the evaluate function by passing test data and batch size and the accuracy and the loss value can be retrieved. **MNIST_V1.0|Exercise: Try to observe the network behavior by changing the number of epochs, batch size and record the best accuracy that you can gain. Here you can record what happens when you change these values. Describe your observations in 50-100 words.** ``` loss, acc = model.evaluate(x_test, y_test, batch_size=batch_size) print("\nTest accuracy: %.1f%%" % (100.0 * acc)) ``` ## Final Note This programme can be defined as a hello world programme in deep learning. Objective of this exercise is not to teach you the depths of deep learning. But to teach you basic concepts that may need to design a simple network to solve a problem. Before running the whole code, read all the instructions before a code section. ## Homework **Solve Exercise MNIST_V1.0.** ### Reference: [Orignal Source to Source Code](https://github.com/PacktPublishing/Advanced-Deep-Learning-with-Keras)
github_jupyter
``` import pandas as pd import numpy as np import pickle import json import seaborn as sns import os import tqdm import matplotlib.pyplot as plt import re from collections import Counter import sys sys.path.insert(0,'../') from utils.reading_utils import * from utils.embeddings_utils import * from utils.processing_utils import * sns.set() ``` # Data loading ``` ABS_PATH = '../data/cornell/cornell movie-dialogs corpus' ``` # Processing movie_lines.txt ``` movie_lines = read_chameleons(os.path.join(ABS_PATH,'movie_lines.txt'), ['lineID', 'charID', 'movieID','char_name', 'utter']) movie_lines.head() movie_lines['utter'] = movie_lines['utter'].apply(cut_punctuation) movie_lines['utter'] = movie_lines['utter'].apply(clean_bad_chars) word_counts = Counter(np.hstack(movie_lines['utter'].apply(lambda x: x.split(' ')).values)) len(word_counts) token_counts = Counter(np.hstack(movie_lines['utter'].apply(lambda x: x.split(' ')))) movie_lines['utter'] = movie_lines['utter'].apply(lambda x: x.lower()) movie_lines['utter'] = movie_lines['utter'].apply(uncover_reduction) movie_lines['utter'] word_counts = Counter(np.hstack(movie_lines['utter'].apply(lambda x: x.split(' ')).values)) len(word_counts) top_50_tokens, counts = zip(*sorted(word_counts.items(), key=lambda x: x[1])[::-1][:50]) plt.figure(figsize=(12,12)) plt.barh(range(len(top_50_tokens)), counts) plt.yticks(range(len(top_50_tokens)),top_50_tokens); (movie_lines['utter']=='').astype(int).sum() movie_lines = movie_lines[movie_lines['utter'].apply(lambda x: True if x else False)] (movie_lines['utter']=='').astype(int).sum() ``` # Movie conversations loading and processing ``` movie_conversations = read_chameleons(os.path.join(ABS_PATH,'movie_conversations.txt'), columns = ['charID_1', 'charID_2', 'movieID', 'conversation']) movie_conversations.head() movie_conversations['conversation'] = movie_conversations['conversation'].apply(lambda x: json.loads(x.replace("'",'"'))) utters = movie_lines[['lineID','utter']].set_index('lineID').to_dict('dict')['utter'] movie_conversations['conversation'] = movie_conversations['conversation'].apply(lambda x : [utters.get(i) for i in x if utters.get(i)]) conversations = movie_conversations['conversation'].values conversations[:2] import gc del utters, movie_conversations, movie_lines; gc.collect(); ``` # Processing of conversations and mapping creation ``` unique_tokens = Counter(np.hstack([i.split(' ') for i in np.hstack(conversations)])) max_tokens = 30000 rare_tokens, _ = zip(*sorted(unique_tokens.items(), key=lambda x: x[1])[::-1][-(len(unique_tokens)-max_tokens):]) len(unique_tokens), len(rare_tokens) unk_token = '<unk>' unk_dict = dict((i,unk_token) for i in rare_tokens) conversations = replace_in_conversations(conversations, unk_dict) lengths_dialogues = list(map(len,conversations)) Counter(lengths_dialogues) conversations = [i for i in conversations if len(i)>1] unique_tokens = np.unique(np.hstack([i.split(' ') for i in np.hstack(conversations)])) pad_token = '<pad>' start_token = '<start>' end_token = '<end>' token_mapping = {pad_token:0, start_token:1, end_token:2, unk_token:3} start_point = 3 for token in unique_tokens: if not token_mapping.get(token): start_point+=1 token_mapping.update({token:start_point}) len(token_mapping) ``` # Creation of embeddings matrix ``` import gensim import tqdm data_path = '../processed_data' if not os.path.exists(data_path): os.mkdir(data_path) ``` ## w2v embeddings ``` embeddings = load_emb_from_disk('../embeddings/GoogleNews-vectors-negative300.bin') tokens_embs, missing = get_emb_rep(list(token_mapping.keys()), embeddings) final_embeddings = create_embeddings(tokens_embs, token_mapping, pad_token) with open(os.path.join(data_path,'w2v_embeddings_cornell.npy'), 'wb') as f: np.save(f, final_embeddings) with open(os.path.join(data_path,'missing_in_w2v_cornell.npy'), 'wb') as f: np.save(f, missing) final_embeddings.shape import gc; del final_embeddings gc.collect(); del embeddings; gc.collect(); ``` ## glove embeddings ``` glove_vectors = load_glove('../embeddings/glove.6B.100d.txt') len(glove_vectors) tokens_embs, missing = get_emb_rep_glove(list(token_mapping.keys()), glove_vectors) final_embeddings = create_embeddings(tokens_embs, token_mapping, pad_token) with open(os.path.join(data_path,'glove_embeddings_cornell.npy'), 'wb') as f: np.save(f, final_embeddings) with open(os.path.join(data_path,'missing_in_glove_cornell.npy'), 'wb') as f: np.save(f, missing) del final_embeddings; gc.collect(); ``` ## Training custom glove embeddings ``` glove_input = np.hstack(conversations) glove_input = [i.split(' ') for i in glove_input] glove_input[:1] from glove import Glove, Corpus corpus = Corpus() corpus.fit(glove_input) glove = Glove(no_components=100, learning_rate=0.05) glove.fit(corpus.matrix, epochs=100, no_threads=6, verbose=True) glove.add_dictionary(corpus.dictionary) glove.most_similar('hello') glove.most_similar('fuck') glove.most_similar('usa') glove.most_similar('jack') tokens = list(glove.dictionary.keys()) print(len(tokens)) glove_vectors = dict([(tokens[idx],vector) for idx, vector in enumerate(glove.word_vectors)]) tokens_embs, missing = get_emb_rep_glove(list(token_mapping.keys()), glove_vectors) final_embeddings = create_embeddings(tokens_embs, token_mapping, pad_token) with open(os.path.join(data_path,'glove_embeddings_trained_cornell.npy'), 'wb') as f: np.save(f, final_embeddings) with open(os.path.join(data_path,'missing_in_glove_trained_cornell.npy'), 'wb') as f: np.save(f, missing) ``` ## saving out ``` with open(os.path.join(data_path,'token_mapping_cornell.json'), 'w') as f: json.dump(token_mapping, f) with open(os.path.join(data_path,'data_cornell.npy'), 'wb') as f: np.save(f, conversations) ```
github_jupyter
___ <a href='https://www.learntocodeonline.com/'> <img src='files/IMGs/learn to code online.png' /></a> ___ # Creating A Workspace A workspace is a directory on your machine that will host all of the files required for your project. **Organization** is what determines a good from great developer. He always creates his workspace in the home directory: `workspace` _Personally, I like to keep mine short due to Windows restrictions, so I have one called **code**_. Keeping your project folder in a highler lever has many benefits, including ensuring there are less likely to be character limits. You can then create a shortcut to it in the **Quick Access** section. Within the *workspace* folder, create a directory for the project you will be working on. For the intent and purpose of this training, it should be titled: `profiles-rest-api` Recommended to NOT use spaces or capitalization since we will be using **git bash** to navigate. It is also recommended to put it in the home, however if you [create a symbolic link](https://skimfeed.com/blog/symbolic-links-in-windows-for-pointing-a-folder-to-another-folder-on-an-external-hard-drive-or-ssd/) it won't matter where you put it. You will still be able to follow these steps exactly. ## Navigate To Project Folder With Atom Open the directory in Atom. 1. Open the program 2. Close all tabs 3. File > Add Project Folder > select the project folder in your workspace Nifty feature of Atom is that you can have multiple projects available at once! ## How To Navigate To Project Folder With Git Bash 1. open Windows menu and search for **git bash** - then open the program This is basically a replacement for the command prompt window, but gives a lot of linux online tools needed in this course. Also a lot easier to work with git projects. 2. Use **ls** to list all files and folders in current location 3. Use the **cd** (change directory) command to navigate to the project folder: - `cd workspace` - `cd profiles-rest-api` You can also auto-complete your line if you press the **TAB** key. Goofed up? You can move up a folder by typing: `cd..` TIP: `pwd` should show current location. # Creating A Git Project Git is a useful tools that allows you to track all changes to your project. Take note of the following cheat sheets: - [GitHub Markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) - [GitHub Python.gitignore](https://gist.github.com/LondonAppDev/66c3291e4f487ac92fcc96735e44c35e) ## ReadMe File This file is created first so that explains what the project is called and what it is about. This will be a markdown file, so the filename will look like this: `README.md` Markdown is a typical markup language for readme files - it is an adopted standard. ## GitIgnore File Every time you do a change in git, you must do a commit - or a snapshot of what your code is currently. This allows you to compare to previous snapshot so you can see all of the changes in every file. The **git ignore** file tells git what files do NOT need to be added to git. Other files are created as a byproduct of source code that we do not want to commit to version control - such as things created when new developers run the code. 1. Right click on project folder > New File 2. Call it: `.gitignore` (and yes, a period at the front) Depending on what language you're using, your gitignore file might look different. Since we're using python, you'll want to copy the entire contents of [GitHub Python.gitignore](https://gist.github.com/LondonAppDev/66c3291e4f487ac92fcc96735e44c35e) to your file. <img src='files/IMGs/git/git-02a.png'> # Initialize Git Repository 1. Locate the **git bash** desktop app. 2. Change directory into your project folder. `cd workspace/profiles-rest-api` 3. Enter the command: `git init` This initializes our project as an empty git repo. ## Create README.md File The uprpose of the README is so if anyone finds the project or you pick it up later and may have forgotten what it was, this file can explain what it is and how to use it. This will now show up as a green file - meaning something is new since last commit. <img src='files/IMGs/git/git-03.png'> ## Create .gitignore File This tells git a list of files or directories to exclude. You can find an example [here](https://github.com/LondonAppDev/course-rest-api/blob/master/.gitignore), which is standard for python-vagrant projects. ## Create License File Since pushing to Github, it is best practice to include a license with your project. This tells others what they are allowed to do with this code (e.g. reuse) as well as to protect yourself so people can't sue if something goes wrong. (In the license you say there is no warranty or liability for use of the code.) You can view an example [here](https://github.com/LondonAppDev/course-rest-api/blob/master/LICENSE). # Add Files To Repo Every time you wish to add a file to the repo you must run: `git add FILENAME` If you wish to add all files, run: `git add .` # Commit Changes Every time you make a change to files in your repo, you want to commit the changes to git by using the following command:<br> `git commit -a -m "description of change since last commit"` <img src='files/IMGs/git/git-01.png'> # Pushing To Github Repo ## Create Public.Private Key Pair In your **git bash** window: `ls ~/.ssh` This is to determine if we already have existing keys in SSH directory. By default, this is where all public/private SSH keys are created ont he system. If no keys, you will create a new one by typing: `SSH-keyget -t rsa -b 4096 -C "EMAIL ADDRESS HERE"` This runs the SSH keygen tool & tells it we want to create a new key with type RSA that is 4,096 bytes. The rest is a comment so we can identify what key this is for if ever need to check it. It will try to save to a default location - you can either press **Enter** or choose a new location. You can also create an optional passphrase for an additional layer of security. Once completed, there will be 2 new files created in the directory.\: - id_rsa (this is private so never share it!) - id_rsa.pub ## Add Public Key To Account Additional info can be found [here](https://docs.github.com/en/authentication/connecting-to-github-with-ssh). ## Create Repo For Project If you haven't already, you will need to create a repo for your project. Then be sure to [set up your streaming](https://docs.github.com/en/get-started/getting-started-with-git/about-remote-repositories). 1. Inside **git bash** within the folder of your project, run: `git remote add origin git@github.com:YOURUSERNAME/PROJECT.git` 2. Run: `git push -u origin main` If this is the first time you've connected on your machine, it will ask if you wish to permanently add the RSA fingerprint from GitHub. You'll want to yes `yes`.
github_jupyter
# Demo of ROCKET transform ## Overview ROCKET [1] transforms time series using random convolutional kernels (random length, weights, bias, dilation, and padding). ROCKET computes two features from the resulting feature maps: the max, and the proportion of positive values (or ppv). The transformed features are used to train a linear classifier. [1] Dempster A, Petitjean F, Webb GI (2019) ROCKET: Exceptionally fast and accurate time series classification using random convolutional kernels. [arXiv:1910.13051](https://arxiv.org/abs/1910.13051) *** ## Contents 1. Imports 2. Univariate Time Series 3. Multivariate Time Series 4. Pipeline Example *** ## 1 Imports Import example data, ROCKET, and a classifier (`RidgeClassifierCV` from scikit-learn), as well as NumPy and `make_pipeline` from scikit-learn. **Note**: ROCKET compiles (via Numba) on import, which may take a few seconds. ``` # !pip install --upgrade numba import numpy as np from sklearn.linear_model import RidgeClassifierCV from sklearn.pipeline import make_pipeline from sktime.datasets import load_arrow_head # univariate dataset from sktime.datasets.base import load_japanese_vowels # multivariate dataset from sktime.transformations.panel.rocket import Rocket ``` ## 2 Univariate Time Series We can transform the data using ROCKET and separately fit a classifier, or we can use ROCKET together with a classifier in a pipeline (section 4, below). ### 2.1 Load the Training Data For more details on the data set, see the [univariate time series classification notebook](https://github.com/alan-turing-institute/sktime/blob/main/examples/02_classification_univariate.ipynb). ``` X_train, y_train = load_arrow_head(split="train", return_X_y=True) ``` ### 2.2 Initialise ROCKET and Transform the Training Data ``` rocket = Rocket() # by default, ROCKET uses 10,000 kernels rocket.fit(X_train) X_train_transform = rocket.transform(X_train) ``` ### 2.3 Fit a Classifier We recommend using `RidgeClassifierCV` from scikit-learn for smaller datasets (fewer than approx. 20K training examples), and using logistic regression trained using stochastic gradient descent for larger datasets. ``` classifier = RidgeClassifierCV(alphas=np.logspace(-3, 3, 10), normalize=True) classifier.fit(X_train_transform, y_train) ``` ### 2.4 Load and Transform the Test Data ``` X_test, y_test = load_arrow_head(split="test", return_X_y=True) X_test_transform = rocket.transform(X_test) ``` ### 2.5 Classify the Test Data ``` classifier.score(X_test_transform, y_test) ``` *** ## 3 Multivariate Time Series We can use ROCKET in exactly the same way for multivariate time series. ### 3.1 Load the Training Data ``` X_train, y_train = load_japanese_vowels(split="train", return_X_y=True) ``` ### 3.2 Initialise ROCKET and Transform the Training Data ``` rocket = Rocket() rocket.fit(X_train) X_train_transform = rocket.transform(X_train) ``` ### 3.3 Fit a Classifier ``` classifier = RidgeClassifierCV(alphas=np.logspace(-3, 3, 10), normalize=True) classifier.fit(X_train_transform, y_train) ``` ### 3.4 Load and Transform the Test Data ``` X_test, y_test = load_japanese_vowels(split="test", return_X_y=True) X_test_transform = rocket.transform(X_test) ``` ### 3.5 Classify the Test Data ``` classifier.score(X_test_transform, y_test) ``` *** ## 4 Pipeline Example We can use ROCKET together with `RidgeClassifierCV` (or another classifier) in a pipeline. We can then use the pipeline like a self-contained classifier, with a single call to `fit`, and without having to separately transform the data, etc. ### 4.1 Initialise the Pipeline ``` rocket_pipeline = make_pipeline( Rocket(), RidgeClassifierCV(alphas=np.logspace(-3, 3, 10), normalize=True) ) ``` ### 4.2 Load and Fit the Training Data ``` X_train, y_train = load_arrow_head(split="train", return_X_y=True) # it is necessary to pass y_train to the pipeline # y_train is not used for the transform, but it is used by the classifier rocket_pipeline.fit(X_train, y_train) ``` ### 4.3 Load and Classify the Test Data ``` X_test, y_test = load_arrow_head(split="test", return_X_y=True) rocket_pipeline.score(X_test, y_test) ```
github_jupyter
# New outbreaks in Germany? ``` import oscovida as osc import pandas as pd pd.options.display.max_rows = 100 %matplotlib inline %config InlineBackend.figure_formats = ['svg'] x = osc.fetch_data_germany(include_last_day=True) def cumulative_cases(x): cases_diff = x.groupby(['Landkreis', 'Meldedatum']).agg('sum')['AnzahlFall'] # cases_diff has a multi-index # sanity check: what is the total number of cases total = cases_diff.sum() # Turn into table with row index being dates, and Landkreise as Columns tmp1 = cases_diff.unstack().T # compute cumulative sum per Landkreis tmp2 = tmp1.cumsum() # replace NaNs from alignment (data not available for all dates) tmp3 = tmp2.fillna(method='pad') # We may have some NaNs left for the earliest dates. Replace by zeros tmp4 = tmp3.fillna(value=0) # Compute sum again. As we have cumulative numbers, we only take the last row: total2 = tmp4.iloc[-1, :].sum() diff = abs(total - total2) if diff > 0: print("diff=", diff) assert diff == 0 # make index a proper datetime object tmp4.index = pd.to_datetime(tmp4.index) return tmp4 y = cumulative_cases(x) #y y.diff().iloc[-1,:].sum() def get_7_day_averages(y): y = y.copy() total = y.iloc[-1,:].sum() print(total) diffs = y.diff() print(diffs.sum().sum()) # diffs2 = diffs.rolling(7, center=True).mean() diffs2 = diffs diffs2 = diffs.rolling(7, center=True).mean() # This introduced new NaNs at the beginning and end. Get rid of those len1 = len(diffs2) diffs2.dropna(inplace=True) len2 = len(diffs2) assert len1 - len2 == 7 print(diffs2.sum().sum()) # sample every 7 days, but start from most recent data data = diffs2 print(data.sum().sum()) end_time = data.index[-1] data['time to end'] = end_time - data.index data['Meldedatum'] = data.index data.set_index('time to end', inplace=True) print(data.sum().sum()) data2 = data.resample('1D').asfreq() # Or your function # data2 = data print(data2.sum().sum()*1) data2['datetime'] = end_time - data2.index data2.set_index('datetime', inplace=True) return data2 z = get_7_day_averages(y) z.drop(columns='Meldedatum', inplace=True) z.iloc[1,:].index df.drop('Meldedatum', inplace=True) df = pd.DataFrame() df['last week'] = z.iloc[0, :] # df['prev weeks'] = z.iloc[1:3].mean() df['prev weeks'] = z.iloc[1, :] df['growth'] = df['last week'] / df['prev weeks'] df df.sort_values(by='last week', ascending=False).head(40) df.loc["LK Rhein-Sieg-Kreis"] df.sort_values(by='growth', ascending=False).head(40) df2 = df.drop(df[df['last week'] < 2].index) df2.shape df.shape df2.sort_values(by='growth', ascending=False).head(40) !pwd ```
github_jupyter
# Word prediction based on Quadgram This program doesn't read the corpus line by line so it is faster than the program which reads the corpus line by line ## Import corpus ``` #%%timeit from nltk.util import ngrams from collections import defaultdict import nltk #import corpus file = open('corpusfile.txt','r') content = file.read() ``` # Do preprocessing: ## Tokenize the corpus data ``` #token = nltk.word_tokenize(content) token = content.split() ``` ## Remove the punctuations and lowercase the tokens ``` #%%timeit import string i = 0 for word in token : for l in word : if l in string.punctuation: word = word.replace(l," ") #token[i] = "".join(l for l in word if l not in string.punctuation) #token[i] = word.replace('.','').replace(' ','').replace(',','').replace(':','').replace(';','').replace('!','').replace('?','').replace('(','').replace(')','') token[i] = word.lower() i=i+1 content = " ".join(token) token = content.split() #print(token) ``` ## Make vocabulary list ``` #%%timeit vocab = set(token) ``` ## Tokenize in Trigram ``` #%%timeit trigrams = list(ngrams(token ,3)) #print(trigrams) ``` ## Tokenize the tokens in Quadgram ``` #%%timeit quadgrams = list(ngrams(token,4)) #print(quadgrams) ``` ## Make unique trigrams and make a table with frequency ``` #%%timeit tri_dict = defaultdict(int) #count the frequency of the trigram sentences for t in trigrams: sen = ' '.join(t) if sen not in tri_dict: tri_dict[sen] = 1 else: tri_dict[sen] += 1 #print(tri_dict) #%%timeit quad_dict = defaultdict(int) #count the frequency of the quadgram sentences for q in quadgrams: sen = ' '.join(q) if sen not in quad_dict: quad_dict[sen] = 1 else: quad_dict[sen] += 1 #print(quad_dict) ``` ## Find the probability ``` def findprobability(s,w): c1 = 0 # for count of sentence 's' with word 'w' c2 = 0 # for count of sentence 's' s1 = s + ' ' + w #print(s1) if s1 in quad_dict: c1 = quad_dict[s1] if s in tri_dict: c2 = tri_dict[s] #print(c1,c2) if c2 == 0: return 0 return c1/c2 ``` ## Driver function for doing the prediction ``` #%%timeit del token[:] def doPrediction(sen): #remove punctuations and make it lowercase temp_l = sen.split() i = 0 for word in temp_l : for l in word : if l in string.punctuation: word = word.replace(l," ") #token[i] = "".join(l for l in word if l not in string.punctuation) #token[i] = word.replace('.','').replace(' ','').replace(',','').replace(':','').replace(';','').replace('!','').replace('?','').replace('(','').replace(')','') temp_l[i] = word.lower() i=i+1 content = " ".join(temp_l) temp_l = content.split() #print(temp_l) sen = ' '.join(temp_l) #print(sen) max_prob = 0 #when there is no probable word available #now for guessing the word which should exist we use quadgram right_word = 'apple' for word in vocab: prob = findprobability(sen,word) if prob > max_prob: max_prob = prob right_word = word print('Word Prediction is :',right_word) #print('Probability:',max_prob) #print(len(token),',',len(vocab)) sen = input('Enter three words\n') doPrediction(sen) ```
github_jupyter
# Binned Likelihood Tutorial The detection, flux determination, and spectral modeling of Fermi LAT sources is accomplished by a maximum likelihood optimization technique as described in the [Cicerone](https://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Likelihood/) (see also, e.g., [Abdo, A. A. et al. 2009, ApJS, 183, 46](http://adsabs.harvard.edu/abs/2009ApJS..183...46A)). To illustrate how to use the Likelihood software, this tutorial gives a step-by-step description for performing a binned likelihood analysis. ## Binned vs Unbinned Likelihood Binned likelihood analysis is the preferred method for most types of LAT analysis (see [Cicerone](https://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Likelihood/)). However, when analyzing data over short time periods (with few events), it is better to use the **unbinned** analysis. To perform an unbinned likelihood analysis, see the [Unbinned Likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/likelihood_tutorial.html) tutorial. **Additional references**: * [SciTools References](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/references.html) * Descriptions of available [Spectral and Spatial Models](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/source_models.html) * Examples of [XML Model Definitions for Likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#xmlModelDefinitions): * [Power Law](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#powerlaw) * [Broken Power Law](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#brokenPowerLaw) * [Broken Power Law 2](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#powerLaw2) * [Log Parabola](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#logParabola) * [Exponential Cutoff](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#expCutoff) * [BPL Exponential Cutoff](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#bplExpCutoff) * [Gaussian](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#gaussian) * [Constant Value](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#constantValue) * [File Function](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#fileFunction) * [Band Function](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#bandFunction) * [PL Super Exponential Cutoff](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#plSuperExpCutoff) # Prerequisites You will need an **event** data file, a **spacecraft** data file (also referred to as the "pointing and livetime history" file), and the current **background models** (available for [download](https://fermi.gsfc.nasa.gov/ssc/data/access/lat/BackgroundModels.html)). They are also found in code cells below. You may choose to select your own data files, or to use the files provided within this tutorial. Custom data sets may be retrieved from the [Lat Data Server](http://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/LATDataQuery.cgi). # Outline 1. **Make Subselections from the Event Data** Since there is computational overhead for each event associated with each diffuse component, it is useful to filter out any events that are not within the extraction region used for the analysis. 2. **Make Counts Maps from the Event Files** By making simple FITS images, we can inspect our data and pick out obvious sources. 3. **Download the latest diffuse models** The recommended models for a normal point source analysis are `gll_iem_v07.fits` (a very large file) and `iso_P8R3_SOURCE_V2_v1.txt`. All of the background models along with a description of the models are available [here](https://fermi.gsfc.nasa.gov/ssc/data/access/lat/BackgroundModels.html). 4. **Create a Source Model XML File** The source model XML file contains the various sources and their model parameters to be fit using the **gtlike** tool. 5. **Create a 3D Counts Cube** The binned counts cube is used to reduce computation requirements in regions with large numbers of events. 6. **Compute Livetimes** Precomputing the livetime for the dataset speeds up the exposure calculation. 7. **Compute Exposure Cube** This accounts for exposure as a function of energy, based on the cuts made. The exposure map must be recomputed if any change is made to the data selection or binning. 8. **Compute Source Maps** Here the exposure calculation is applied to each of the sources described in the model. 9. **Perform the Likelihood Fit** Fitting the data to the model provides flux, errors, spectral indices, and other information. 10. **Create a Model Map** This can be compared to the counts map to verify the quality of the fit and to make a residual map. # 1. Make subselections from the event data For this case we will use two years of LAT Pass 8 data. This is a longer data set than is described in the [Extract LAT Data](../DataSelection/1.ExtractLATData.ipynb) tutorial. >**NOTE**: The ROI used by the binned likelihood analysis is defined by the 3D counts map boundary. The region selection used in the data extraction step, which is conical, must fully contain the 3D counts map spatial boundary, which is square. Selection of data: Search Center (RA, DEC) =(193.98, -5.82) Radius = 15 degrees Start Time (MET) = 239557417 seconds (2008-08-04 T15:43:37) Stop Time (MET) = 302572802 seconds (2010-08-04 T00:00:00) Minimum Energy = 100 MeV Maximum Energy = 500000 MeV This two-year dataset generates numerous data files. We provide the user with the original event data files and the accompanying spacecraft file: * L181126210218F4F0ED2738_PH00.fits (5.0 MB) * L181126210218F4F0ED2738_PH01.fits (10.5 MB) * L181126210218F4F0ED2738_PH02.fits (6.5 MB) * L181126210218F4F0ED2738_PH03.fits (9.2 MB) * L181126210218F4F0ED2738_PH04.fits (7.4 MB) * L181126210218F4F0ED2738_PH05.fits (6.2 MB) * L181126210218F4F0ED2738_PH06.fits (4.5 MB) * L181126210218F4F0ED2738_SC00.fits (256 MB spacecraft file) ``` !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH00.fits !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH01.fits !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH02.fits !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH03.fits !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH04.fits !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH05.fits !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH06.fits !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_SC00.fits !mkdir ./data !mv *.fits ./data !ls ./data ``` In order to combine the two events files for your analysis, you must first generate a text file listing the events files to be included. If you do not wish to download all the individual files, you can skip to the next step and retrieve the combined, filtered event file. However, you will need the spacecraft file to complete the analysis, so you should retrieve that now. To generate the file list, type: ``` !ls ./data/*_PH* > ./data/binned_events.txt ``` When analyzing point sources, it is recommended that you include events with high probability of being photons. To do this, you should use **gtselect** to cut on the event class, keeping only the SOURCE class events (event class 128, or as recommended in the Cicerone). In addition, since we do not wish to cut on any of the three event types (conversion type, PSF, or EDISP), we will use `evtype=3` (which corresponds to standard analysis in Pass 7). Note that `INDEF` is the default for evtype in gtselect. ```bash gtselect evclass=128 evtype=3 ``` Be aware that `evclass` and `evtype` are hidden parameters. So, to use them, you must type them on the command line. The text file you made (`binned_events.txt`) will be used in place of the input fits filename when running gtselect. The syntax requires that you use an @ before the filename to indicate that this is a text file input rather than a fits file. We perform a selection to the data we want to analyze. For this example, we consider the source class photons within our 15 degree region of interest (ROI) centered on the blazar 3C 279. For some of the selections that we made with the data server and don't want to modify, we can use "INDEF" to instruct the tool to read those values from the data file header. Here, we are only filtering on event class (not on event type) and applying a zenith cut, so many of the parameters are designated as "INDEF". We apply the **gtselect** tool to the data file as follows: ``` %%bash gtselect evclass=128 evtype=3 @./data/binned_events.txt ./data/3C279_binned_filtered.fits INDEF INDEF INDEF INDEF INDEF 100 500000 90 ``` In the last step we also selected the energy range and the maximum zenith angle value (90 degrees) as suggested in Cicerone and recommended by the LAT instrument team. The Earth's limb is a strong source of background gamma rays and we can filter them out with a zenith-angle cut. The use of "zmax" in calculating the exposure allows for a more selective method than just using the ROI cuts in controlling the Earth limb contamination. The filtered data from the above steps are provided [here](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_filtered.fits). After the data selection is made, we need to select the good time intervals in which the satellite was working in standard data taking mode and the data quality was good. For this task we use **gtmktime** to select GTIs by filtering on information provided in the spacecraft file. The current **gtmktime** filter expression recommended by the LAT team in the Cicerone is: ``` (DATA_QUAL>0)&&(LAT_CONFIG==1) ``` This excludes time periods when some spacecraft event has affected the quality of the data; it ensures the LAT instrument was in normal science data-taking mode. Here is an example of running **gtmktime** for our analysis of the region surrounding 3C 279. ``` %%bash gtmktime @./data/L181126210218F4F0ED2738_SC00.fits (DATA_QUAL>0)&&(LAT_CONFIG==1) no ./data/3C279_binned_filtered.fits ./data/3C279_binned_gti.fits ``` The data file with all the cuts described above is provided in this [link](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_gti.fits). A more detailed discussion of data selection can be found in the [Data Preparation](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data_preparation.html) analysis thread. To view the DSS keywords in a given extension of a data file, use the **gtvcut** tool and review the data cuts on the EVENTS extension. This provides a listing of the keywords reflecting each cut applied to the data file and their values, including the entire list of GTIs. (Use the option `suppress_gtis=no` to view the entire list.) ``` %%bash gtvcut suppress_gtis=no ./data/3C279_binned_gti.fits EVENTS ``` Here you can see the event class and event type, the location and radius of the data selection, as well as the energy range in MeV, the zenith angle cut, and the fact that the time cuts to be used in the exposure calculation are defined by the GTI table. Various Fermitools will be unable to run if you have multiple copies of a particular DSS keyword. This can happen if the position used in extracting the data from the data server is different than the position used with **gtselect**. It is wise to review the keywords for duplicates before proceeding. If you do have keyword duplication, it is advisable to regenerate the data file with consistent cuts. # 2. Make a counts map from the event data Next, we create a counts map of the ROI, summed over photon energies, in order to identify candidate sources and to ensure that the field looks sensible as a simple sanity check. For creating the counts map, we will use the [gtbin](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtbin.txt) tool with the option "CMAP" (no spacecraft file is necessary for this step). Then we will view the output file, as shown below: ``` %%bash gtbin CMAP ./data/3C279_binned_gti.fits ./data/3C279_binned_cmap.fits NONE 150 150 0.2 CEL 193.98 -5.82 0.0 AIT ``` We chose an ROI of 15 degrees, corresponding to 30 degrees in diameter. Since we want a pixel size of 0.2 degrees/pixel, then we must select 30/0.2=150 pixels for the size of the x and y axes. The last command launches the visualization tool _ds9_ and produces a display of the generated [counts](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_cmap.fits) map. <img src='https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/BinnedLikelihood/3C279_binned_counts_map.png'> You can see several strong sources and a number of weaker sources in this map. Mousing over the positions of these sources shows that two of them are likely 3C 279 and 3C 273. It is important to inspect your data prior to proceeding to verify that the contents are as you expect. A malformed data query or improper data selection can generate a non-circular region, or a file with zero events. By inspecting your data prior to analysis, you have an opportunity to detect such issues early in the analysis. A more detailed discussion of data exploration can be found in the [Explore LAT Data](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/explore_latdata.html) analysis thread. # 3. Create a 3-D (binned) counts map Since the counts map shows the expected data, you are ready to prepare your data set for analysis. For binned likelihood analysis, the data input is a three-dimensional counts map with an energy axis, called a counts cube. The gtbin tool performs this task as well by using the `CCUBE` option. <img src="https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/BinnedLikelihood/square_in_circle.png"> The binning of the counts map determines the binning of the exposure calculation. The likelihood analysis may lose accuracy if the energy bins are not sufficiently narrow to accommodate more rapid variations in the effective area with decreasing energy below a few hundred MeV. For a typical analysis, ten logarithmically spaced bins per decade in energy are recommended. The analysis is less sensitive to the spatial binning and 0.2 deg bins are a reasonable standard. This counts cube is a square binned region that must fit within the circular acceptance cone defined during the data extraction step, and visible in the counts map above. To find the maximum size of the region your data will support, find the side of a square that can be fully inscribed within your circular acceptance region (multiply the radius of the acceptance cone by sqrt(2)). For this example, the maximum length for a side is 21.21 degrees. To create the counts cube, we run [gtbin](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtbin.txt) as follows: ``` %%bash gtbin CCUBE ./data/3C279_binned_gti.fits ./data/3C279_binned_ccube.fits NONE 100 100 0.2 CEL 193.98 -5.82 0.0 AIT LOG 100 500000 37 ``` [gtbin](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtbin.txt) takes the following as parameters: * Type of output file (CCUBE|CMAP|LC|PHA1|PHA2|HEALPIX) * Event data file name * Output file name * Spacecraft data file name * Size of the X axis in pixels * Size of the Y axis in pixels * Image scale (in degrees/pixel) * Coordindate system (CEL - celestial; GAL - galactic) (pick CEL or GAL) * First coordinate of image center in degrees (RA or galactic l) * Second coordinate of image center in degrees (DEC or galactic b) * Rotation angle of image axis, in degrees * Projection method (AIT|ARC|CAR|GLS|MER|NCP|SIN|STG|TAN) * Algorithm for defining energy bins (FILE|LIN|LOG) * Start value for first energy bin in MeV * Stop value for last energy bin in MeV * Number of logarithmically uniform energy bins The counts cube generated in this step is provided [here](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_ccube.fits). If you open the file with _ds9_, you see that it is made up of 37 images, one for each logarithmic energy bin. By playing through these images, it is easy to see how the PSF of the LAT changes with energy. You can also see that changing energy cuts could be helpful when trying to optimize the localization or spectral information for specific sources. Be sure to verify that there are no black corners on your counts cube. These corners correspond to regions with no data and will cause errors in your exposure calculations. # 4. Download the latest diffuse model files When you use the current Galactic diffuse emission model ([`gll_iem_v07.fits`](https://fermi.gsfc.nasa.gov/ssc/data/analysis/software/aux/4fgl/gll_iem_v07.fits)) in a likelihood analysis, you also want to use the corresponding model for the extragalactic isotropic diffuse emission, which includes the residual cosmic-ray background. The recommended isotropic model for point source analysis is [`iso_P8R3_SOURCE_V2_v1.txt`](https://fermi.gsfc.nasa.gov/ssc/data/analysis/software/aux/4fgl/iso_P8R3_SOURCE_V2_v1.txt). All the Pass 8 background models have been included in the Fermitools distribution, in the `$(FERMI_DIR)/refdata/fermi/galdiffuse/` directory. If you use that path in your model, you should not have to download the diffuse models individually. >**NOTE**: Keep in mind that the isotropic model needs to agree with both the event class and event type selections you are using in your analysis. The iso_P8R3_SOURCE_V2_v1.txt isotropic spectrum is valid only for the latest response functions and only for data sets with front + back events combined. All of the most up-to-date background models along with a description of the models are available [here](https://fermi.gsfc.nasa.gov/ssc/data/access/lat/BackgroundModels.html). # 5. Create a source model XML file The [gtlike](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtlike.txt) tool reads the source model from an XML file. The model file contains your best guess at the locations and spectral forms for the sources in your data. A source model can be created using the [model editor](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/modeleditor.txt) tool, by using the user contributed tool `make4FGLxml.py` (available at the [user-contributed tools](https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/) page), or by editing the file directly within a text editor. Here we cannot use the same source model that was used to analyze six months of data in the Unbinned Likelihood tutorial, as the 2-year data set contains many more significant sources and will not converge. Instead, we will use the 4FGL catalog to define our source model by running `make4FGLxml.py`. To run the script, you will need to download the current LAT catalog file and place it in your working directory: ``` !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/python3/make4FGLxml.py !wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/10yr_catalog/gll_psc_v26.xml !mv make4FGLxml.py gll_psc_v26.xml ./data !python ./data/make4FGLxml.py ./data/gll_psc_v26.xml ./data/3C279_binned_gti.fits -o ./data/3C279_input_model.xml -r 5.0 ``` Note that we are using a high level of significance so that we only fit the brightest sources, and we have forced the extended sources to be modeled as point sources. It is also necessary to specify the entire path to location of the diffuse model on your system. Clearly, the simple 4-source model we used for the 6-month [Unbinned Likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/likelihood_tutorial.html) analysis would have been too simplistic. This XML file uses the spectral model from the 4FGL catalog analysis for each source. (The catalog file is available at the [LAT 8-yr Catalog page](https://fermi.gsfc.nasa.gov/ssc/data/access/lat/8yr_catalog/).) However, that analysis used a subset of the available spectral models. A dedicated analysis of the region may indicate a different spectral model is preferred. For more details on the options available for your XML models, see: * Descriptions of available [Spectral and Spatial Models](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/source_models.html) * Examples of [XML Model Definitions for Likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html) Finally, the `make4FGLxml.py` script automatically adds 10 degrees to your ROI to account for sources that lie outside your data region, but which may contribute photons to your data. In addition, it gives you the ability to free only some of the spectral parameters for sources within your ROI, and fixes them for the others. With hundreds of sources, there are too many free parameters to gain a good spectral fit. It is advisable to revise these values so that only sources near your source of interest, or very bright source, have all spectral parameters free. Farther away, you can fix the spectral form and free only the normalization parameter (or "prefactor"). If you are working in a crowded region or have nested sources (e.g. a point source on top of an extended source), you will probably want to fix parameters for some sources even if they lie close to your source of interest. Only the normalization parameter will be left free for the remaining sources within the ROI. We have also used the significance parameter (`-s`) of `make4FLGxml.py` to free only the brightest sources in our ROI. In addition, we used the `-v` flag to override that for sources that are significantly variable. Both these changes are necessary: having too many free parameters will not allow the fit to converge (see the section for the fitting step). ### XML for Extended Sources In some regions, the [make4FGLxml.py](https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make4FGLxml.py) script may add one or more extended sources to your XML model. The script will provide the number of extended sources included in the model. In order to use these extended sources, you will need to downloaded the extended source templates from the [LAT Catalog](https://fermi.gsfc.nasa.gov/ssc/data/access/lat/8yr_catalog/) page (look for "Extended Source template archive"). Extract the archive in the directory of your choice and note the path to the template files, which have names like `W44.fits` and `VelaX.fits`. You will need to provide the path to the template file to the script before you run it. Here is an example of the proper format for an extended source XML entry for Binned Likelihood analysis: ```xml <source name="SpatialMap_source" type="DiffuseSource"> <spectrum type="PowerLaw2"> <parameter free="1" max="1000.0" min="1e-05" name="Integral" scale="1e-06" value="1.0"/> <parameter free="1" max="-1.0" min="-5.0" name="Index" scale="1.0" value="-2.0"/> <parameter free="0" max="200000.0" min="20.0" name="LowerLimit" scale="1.0" value="20.0"/> <parameter free="0" max="200000.0" min="20.0" name="UpperLimit" scale="1.0" value="2e5"/> </spectrum> <spatialModel W44 file="$(PATH_TO_FILE)/W44.fits" type="SpatialMap" map_based_integral="true"> <parameter free="0" max="1000.0" min="0.001" name="Normalization" scale= "1.0" value="1.0"/> </spatialModel> </source> ``` # 6. Compute livetimes and exposure To speed up the exposure calculations performed by Likelihood, it is helpful to pre-compute the livetime as a function of sky position and off-axis angle. The [gtltcube](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/gtltcube.txt) tool creates a livetime cube, which is a [HealPix](http://healpix.jpl.nasa.gov/) table, covering the entire sky, of the integrated livetime as a function of inclination with respect to the LAT z-axis. Here is an example of how to run [gtltcube](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/gtltcube.txt): ``` %%bash gtltcube zmax=90 ./data/3C279_binned_gti.fits ./data/L181126210218F4F0ED2738_SC00.fits ./data/3C279_binned_ltcube.fits 0.025 1 ``` >**Note**: Values such as "0.1" for "Step size in cos(theta) are known to give unexpected results. Use "0.09" instead. The livetime cube generated from this analysis can be found [here](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_ltcube.fits). For more information about the livetime cubes see the documentation in the [Cicerone](https://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Likelihood/) and also the explanation in the [Unbinned Likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/likelihood_tutorial.html) tutorial. # 7. Compute exposure map Next, you must apply the livetime calculated in the previous step to your region of interest. To do this, we use the [gtexpcube2](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtexpcube2.txt) tool, which is an updated version of the previous **gtexpcube**. This tool generates a binned exposure map, an accounting of the exposure at each position in the sky, that are a required input to the likelihood process. >**NOTE**: In the past, running **gtsrcmaps** calculated the exposure map for you, so most analyses skipped the binned exposure map generation step. With the introduction of **gtexpcube2**, this is no longer the case. You must explicitly command the creation of the exposure map as a separate analysis step. In order to create an exposure map that accounts for contributions from all the sources in your analysis region, you must consider not just the sources included in the counts cube. The large PSF of the LAT means that at low energies, sources from well outside your counts cube could affect the sources you are analyzing. To compensate for this, you must create an exposure map that includes sources up to 10 degrees outside your ROI. (The ROI is determined by the radius you downloaded from the data server, here a 15 degree radius.) In addition, you should account for all the exposure that contributes to those additional sources. Since the exposure map uses square pixels, to match the binning in the counts cube, and to ensure we don't have errors, we generate a 300x300 pixel map. If you provide [gtexpcube2](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtexpcube2.txt) a filename for your counts cube, it will use the information from that file to define the geometry of the exposure map. This is legacy behavior and will not give you the necessary 20° buffer you need to completely account for the exposure of nearby sources. (It will also cause an error in the next step.) Instead, you should specify the appropriate geometry for the exposure map, remembering that the counts cube used 0.2 degree pixel binning. To do that, enter `none` when asked for a Counts cube. **Note**: If you get a "`File not found`" error in the examples below, just put the IRF name in explicitly. The appropriate IRF for this data set is `P8R3_SOURCE_V2`. ``` %%bash gtexpcube2 ./data/3C279_binned_ltcube.fits none ./data/3C279_binned_expcube.fits P8R3_SOURCE_V2 300 300 .2 193.98 -5.82 0 AIT CEL 100 500000 37 ``` The generated exposure map can be found [here](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_expcube.fits). At this point, you may decide it is easier to simply generate exposure maps for the entire sky. You may be right, as it certainly simplifies the step when scripting. However, making an all-sky map increases the processing time for this step, though the increase is modest. To generate an all-sky exposure map (rather than the exposure map we calculated above) you need to specify the proper binning and explicitly give the number of pixels for the entire sky (360°x180°). Here is an example: ``` %%bash gtexpcube2 ./data/3C279_binned_ltcube.fits none ./data/3C279_binned_allsky_expcube.fits P8R3_SOURCE_V2 1800 900 .2 193.98 -5.82 0 AIT CEL 100 500000 37 ``` The all-sky exposure map can be found [here](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_allsky_expcube.fits). Just as in the [Unbinned Likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/likelihood_tutorial.html) analysis, the exposure needs to be recalculated if the ROI, zenith angle, time, event class, or energy selections applied to the data are changed. For the binned analysis, this also includes the spatial and energy binning of the 3D counts map (which affects the exposure map as well). # 8. Compute source map The [gtsrcmaps](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtsrcmaps.txt) tool creates model counts maps for use with the binned likelihood analysis. To do this, it takes each source spectrum in the XML model, multiplies it by the exposure at the source position, and convolves that exposure with the effective PSF. This is an example of how to run the tool: ``` %%bash gtsrcmaps ./data/3C279_binned_ltcube.fits ./data/3C279_binned_ccube.fits ./data/3C279_input_model.xml ./data/3C279_binned_allsky_expcube.fits ./data/3C279_binned_srcmaps.fits CALDB ``` The output file from [gtsrcmaps](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtsrcmaps.txt) can be found [here](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_srcmaps.fits). Because your model map can include sources outside your ROI, you may see a list of warnings at the beginning of the output. These are expected (because you have properly included sources outside your ROI in your XML file) and should cause no problem in your analysis. In addition, if your exposure map is too small for the region, you will see the following warning: ``` Caught St13runtime_error at the top level: Request for exposure at a sky position that is outside of the map boundaries. The contribution of the diffuse source outside of the exposure and counts map boundaries is being computed to account for PSF leakage into the analysis region. To handle this, use an all-sky binned exposure map. Alternatively, to neglect contributions outside of the counts map region, use the emapbnds=no option when running gtsrcmaps. ``` In this situation, you should increase the dimensions of your exposure map, or just move to the all-sky version. Source map generation for the point sources is fairly quick, and maps for many point sources may take up a lot of disk space. If you are analyzing a single long data set, it may be preferable to pre-compute only the source maps for the diffuse components at this stage. [gtlike](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtlike.txt) will compute maps for the point sources on the fly if they appear in the XML definition and a corresponding map is not in the source maps FITS file. To skip generating source maps for point sources, specify "`ptsrc=no`" on the command line when running **gtsrcmaps**. However, if you expect to perform multiple fits on the same set of data, precomputing the source maps will probably save you time. # 9. Run gtlike >NOTE: Prior to running **gtlike** for Unbinned Likelihood, it is necessary to calculate the diffuse response for each event (when that response is not precomputed). However, for Binned Likelihood analysis the diffuse response is calculated over the entire bin, so this step is not necessary. If you want to use the **energy dispersion correction** during your analysis, you must enable this feature using the environment variable `USE_BL_EDISP`. This may be set on the command line using: ```bash export USE_BL_EDISP=true ``` or, depending on your shell, ``` setenv USE_BL_EDISP=true ``` To disable the use of energy dispersion, you must unset the variable: ```bash unset USE_BL_EDISP ``` or ``` unsetenv USE_BL_EDISP ``` ```bash export USE_BL_EDISP=true ``` or, depending on your shell, ``` setenv USE_BL_EDISP=true ``` To disable the use of energy dispersion, you must unset the variable: ```bash unset USE_BL_EDISP ``` or ``` unsetenv USE_BL_EDISP ``` Now we are ready to run the [gtlike](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtlike.txt) application. Here, we request that the fitted parameters be saved to an output XML model file for use in later steps. ``` !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_output_model.xml %%bash gtlike refit=yes plot=yes sfile=./data/3C279_binned_output.xml BINNED ./data/3C279_binned_srcmaps.fits ./data/3C279_binned_allsky_expcube.fits ./data/3C279_binned_ltcube.fits ./data/3C279_input_model.xml CALDB NEWMINUIT ``` Most of the entries prompted for are fairly obvious. In addition to the various XML and FITS files, the user is prompted for a choice of IRFs, the type of statistic to use, and the optimizer. The statistics available are: * **UNBINNED**: This should be used for short timescale or low source count data. If this option is chosen then parameters for the spacecraft file, event file, and exposure file must be given. See explanation in: [Likelihood Tutorial]() * **BINNED**: This is a standard binned analysis as described in this tutorial. This analysis is used for long timescale or high-density data (such as in the Galactic plane) which can cause memory errors in the unbinned analysis. If this option is chosen then parameters for the source map file, livetime file, and exposure file must be given. There are five optimizers from which to choose: `DRMNGB`, `DRMNFB`, `NEWMINUIT`, `MINUIT` and `LBFGS`. Generally speaking, the faster way to find the parameter estimates is to use `DRMNGB` (or `DRMNFB`) to find initial values and then use `MINUIT` (or `NEWMINUIT`) to find more accurate results. If you have trouble achieving convergence at first, you can loosen your tolerance by setting the hidden parameter `ftol` on the command line. (The default value for `ftol` is `0.001`.) Analyzing a 2-year dataset will take many hours (in our case more than 2 days with a 32-bit machine with 1 GB of RAM). The required running time is high if your source is in the Galactic plane. Here is some output from our fit, where 4FGL J1229.0+0202 and 4FGL J1256.1-0547 corresponds to 3C 273 and 3C 279, respectively: ``` This is gtlike version ... Photon fluxes are computed for the energy range 100 to 500000 MeV 4FGL J1229.0+0202: norm: 8.16706 +/- 0.0894921 alpha: 2.49616 +/- 0.015028 beta: 0.104635 +/- 0.0105201 Eb: 279.04 TS value: 32017.6 Flux: 6.69253e-07 +/- 7.20102e-09 photons/cm^2/s 4FGL J1256.1-0547: norm: 2.38177 +/- 0.0296458 alpha: 2.25706 +/- 0.0116212 beta: 0.0665607 +/- 0.00757385 Eb: 442.052 TS value: 29261.7 Flux: 5.05711e-07 +/- 6.14833e-09 photons/cm^2/s ... gll_iem_v07: Prefactor: 0.900951 +/- 0.0235397 Index: 0 Scale: 100 Flux: 0.000469334 +/- 1.22608e-05 photons/cm^2/s iso_P8R3_SOURCE_V2_v1: Normalization: 1.13545 +/- 0.0422581 Flux: 0.000139506 +/- 5.19439e-06 photons/cm^2/s WARNING: Fit may be bad in range [100, 199.488] (MeV) WARNING: Fit may be bad in range [251.124, 316.126] (MeV) WARNING: Fit may be bad in range [6302.3, 7933.61] (MeV) WARNING: Fit may be bad in range [39744.4, 50032.1] (MeV) WARNING: Fit may be bad in range [315519, 397190] (MeV) Total number of observed counts: 207751 Total number of model events: 207407 -log(Likelihood): 73014.38504 Writing fitted model to 3C279_binned_output.xml ``` Since we selected `plot=yes` in the command line, a plot of the fitted data appears. <img src="https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/BinnedLikelihood/3C279_binned_spectral_fit.png"> In the first plot, the counts/MeV vs MeV are plotted. The points are the data, and the lines are the models. Error bars on the points represent sqrt(Nobs) in that band, where Nobs is the observed number of counts. The black line is the sum of the models for all sources. The colored lines follow the sources as follows: * Black - summed model * Red - first source (see below) * Green - second source * Blue - third source * Magenta - fourth source * Cyan - the fifth source If you have more sources, the colors are reused in the same order. In our case we have, in order of decreasing value on the y-axis: summed model (black), the extragalactic background (black), the galactic background (cyan), 3C 273 (red), and 3C 279 (black). The second plot gives the residuals between your model and the data. Error bars here represent (sqrt(Nopbs))/Npred, where Npred is the predicted number of counts in each band based on the fitted model. To assess the quality of the fit, look first for the words at the top of the output `<Optimizer> did successfully converge.` Successful convergence is a minimum requirement for a good fit. Next, look at the energy ranges that are generating warnings of bad fits. If any of these ranges affect your source of interest, you may need to revise the source model and refit. You can also look at the residuals on the plot (bottom panel). If the residuals indicate a poor fit overall (e.g., the points trending all low or all high) you should consider changing your model file, perhaps by using a different source model definition, and refit the data. If the fits and spectral shapes are good, but could be improved, you may wish to simply update your model file to hold some of the spectral parameters fixed. For example, by fixing the spectral model for 3C 273, you may get a better quality fit for 3C 279. Close the plot and you will be asked if you wish to refit the data. ``` Refit? [y] n Elapsed CPU time: 1571.805872 ``` Here, hitting `return` will instruct the application to fit again. We are happy with the result, so we type `n` and end the fit. ### Results When it completes, **gtlike** generates a standard output XML file. If you re-run the tool in the same directory, these files will be overwritten by default. Use the `clobber=no` option on the command line to keep from overwriting the output files. Unfortunately, the fit details and the value for the `-log(likelihood)` are not recorded in the automatic output files. You should consider logging the output to a text file for your records by using `> fit_data.txt` (or something similar) with your **gtlike** command. Be aware, however, that this will make it impossible to request a refit when the likelihood process completes. ``` !gtlike plot=no sfile=./data/3C279_output_model.xml > fit_data.txt ``` In this example, we used the `sfile` parameter to request that the model results be written to an output XML file. This file contains the source model results that were written to `results.dat` at the completion of the fit. > **Note**: If you have specified an output XML model file and you wish to modify your model while waiting at the `Refit? [y]` prompt, you will need to copy the results of the output model file to your input model before making those modifications. The results of the likelihood analysis have to be scaled by the quantity called "scale" in the XML model in order to obtain the total photon flux (photons cm-2 s-1) of the source. You must refer to the model formula of your source for the interpretation of each parameter. In our example the 'prefactor' of our power law model of the first fitted source (4FGLJ1159.5-0723) has to be scaled by the factor 'scale'=10-14. For example the total flux of 4FGLJ1159.5-0723 is the integral between 100 MeV and 500000 MeV of: $Prefactor \cdot scale \cdot (E /100)^{index}=(6.7017x10-14) \cdot (E/100)^{-2.0196}$ Errors reported with each value in the `results.dat` file are 1σ estimates (based on inverse-Hessian at the optimum of the log-likelihood surface). ### Other Useful Hidden Parameters If you are scripting and wish to generate multiple output files without overwriting, the `results` and `specfile` parameters allow you to specify output filenames for the `results.dat` and `counts_spectra.fits` files respectively. If you do not specify a source model output file with the `sfile` parameter, then the input model file will be overwritten with the latest fit. This is convenient as it allows the user to edit that file while the application is waiting at the `Refit? [y]` prompt so that parameters can be adjusted and set free or fixed. This would be similar to the use of the "newpar", "freeze", and "thaw" commands in [XSPEC](http://heasarc.gsfc.nasa.gov/docs/xanadu/xspec/index.html). # 10. Create a model map For comparison to the counts map data, we create a model map of the region based on the fit parameters. This map is essentially an infinite-statistics counts map of the region-of-interest based on our model fit. The [gtmodel](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmodel.txt) application reads in the fitted model, applies the proper scaling to the source maps, and adds them together to get the final map. ``` %%bash gtmodel ./data/3C279_binned_srcmaps.fits ./data/3C279_binned_output.xml ./data/3C279_model_map.fits CALDB ./data/3C279_binned_ltcube.fits ./data/3C279_binned_allsky_expcube.fits ``` To understand how well the fit matches the data, we want to compare the [model map](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_model_map.fits) just created with the counts map over the same field of view. First we have to create the [new counts map](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_cmap_small.fits) that matches in size the model map (the one generated in encircles the ROI, while the model map is completely inscribed within the ROI): We will use again the [gtbin](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtbin.txt) tool with the option `CMAP` as shown below: ``` %%bash gtbin CMAP ./data/3C279_binned_gti.fits ./data/3C279_binned_cmap_small.fits NONE 100 100 0.2 CEL 193.98 -5.82 0.0 STG ``` Here we've plotted the model map next to the the energy-summed counts map for the data. <img src='https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/BinnedLikelihood/3C279_binned_map_comparison.png'> Finally we want to create the [residual map](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_residual.fits) by using the FTOOL **farith** to check if we can improve the model: ``` %%bash farith ./data/3C279_binned_cmap_small.fits ./data/3C279_model_map.fits ./data/3C279_residual.fits SUB ``` The residual map is shown below. As you can see, the binning we chose probably used pixels that were too large. The primary sources, 3C 273 and 3C 279, have some positive pixels next to some negative ones. This effect could be lessened by either using a smaller pixel size or by offsetting the central position slightly from the position of the blazar (or both). If your residual map contains bright sources, the next step would be to iterate the analysis with the additional sources included in the XML model file. <img src='https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/BinnedLikelihood/3C279_binned_residuals.png'>
github_jupyter
``` import os # for working directory import numpy as np # for math import scipy as stats # statistical methods import pandas as pd # for data frames import matplotlib.pyplot as plt # for plotting import seaborn as sns from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn import svm import glob os.chdir("C:/Users/Myoung-Goo/Documents/Energy/Python_Workflows") # set working directory WP = pd.read_csv("well production.csv") # reading file # sum oil production WP['Cumulative Oil Production (Year 1)'] = WP.iloc[:,4:16].sum(axis=1) # sum water production WP['Cumulative Water Production (Year 1)'] = WP.iloc[:,16:28].sum(axis=1) # delete unnecessary columns WP = WP.drop(columns=['oil 1', 'oil 2', 'oil 3', 'oil 4', 'oil 5', 'oil 6', 'oil 7', 'oil 8', 'oil 9', 'oil 10', 'oil 11', 'oil 12']) WP = WP.drop(columns=['water 1', 'water 2', 'water 3', 'water 4', 'water 5', 'water 6', 'water 7', 'water 8', 'water 9', 'water 10', 'water 11', 'water 12']) li = [] FN = [] TP = [] MPR = [] MPO = [] MPE = [] MP = [] MPOi = [] MT = [] ME = [] N = [] WS =[] a = [] # sort files by name WP = WP.sort_values('well name') # glob all the files csv_files = glob.glob('Well Data/*.csv') # prep values for every file for files in csv_files: file = pd.read_csv(files) li.append(file.easting.max()-file.easting.min()) FN.append(file['proppant weight (lbs)'].count()) TP.append(file['proppant weight (lbs)'].sum()) MP.append(file['proppant weight (lbs)'].mean()) MPR.append(file['pump rate (cubic feet/min)'].mean()) MPO.append(file['porosity'].mean()) MPE.append(file['permeability'].mean()) MPOi.append(file["Poisson's ratio"].mean()) MT.append(file["thickness (ft)"].mean()) N.append(file["northing"].mean()) ME.append((file["easting"].max()+file['easting'].min())/2) WS.append(file['water saturation'].mean()) a.append(file) # add new columns WP['Well Length'] = li WP['Number of frac stages'] = FN WP['Total Proppant weight (lbs)'] = TP WP['Average Proppant weight (lbs) per stage'] = MP WP['Average pump rate (cubic feet/min)'] = MPR WP['Average Porosity'] = MPO WP['Average Permeability'] = MPE WP["Average Poisson's ratio"] = MPOi WP['Average Thickness (ft)'] = MT WP['Northing'] = N WP['Middle Easting value'] = ME WP['Average Water Saturation'] = WS WP = WP.set_index('well name') WP.head() #WP.to_csv('cleaned well production.csv') AM = 360 AA = AM*640 # formulas WP['Original Oil in Place'] = (7758*AA*WP['Average Thickness (ft)']*WP['Average Porosity']*(1-WP['Average Water Saturation']))/WP['formation volume factor'] WP['Recoverable Reserves'] = WP['Original Oil in Place']*WP['recovery factor'] ```
github_jupyter
``` import torch, math import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt import numpy as np from PIL import Image import time import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader from torch.utils.data import Dataset import torch.optim as optim !pip install torchsummary from torchsummary import summary !pip install einops from math import ceil !pip install performer-pytorch import os, glob import pandas as pd from torchvision.io import read_image from torch import nn, einsum from einops import rearrange, repeat from einops.layers.torch import Rearrange from einops import rearrange, reduce # helpers from einops import reduce batch_size = 160 !wget http://cs231n.stanford.edu/tiny-imagenet-200.zip !unzip -q ./tiny-imagenet-200.zip device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") transform = transforms.Normalize((122.4786, 114.2755, 101.3963), (70.4924, 68.5679, 71.8127)) id_dict = {} for i, line in enumerate(open('./tiny-imagenet-200/wnids.txt', 'r')): id_dict[line.replace('\n', '')] = i class TrainTinyImageNetDataset(Dataset): def __init__(self, id, transform=None): self.filenames = glob.glob("./tiny-imagenet-200/train/*/*/*.JPEG") self.transform = transform self.id_dict = id def __len__(self): return len(self.filenames) def __getitem__(self, idx): img_path = self.filenames[idx] image = read_image(img_path) if image.shape[0] == 1: image = torch.cat((image,image,image),0) label = self.id_dict[img_path.split('/')[3]] if self.transform: image = self.transform(image.type(torch.FloatTensor)) return image, label class TestTinyImageNetDataset(Dataset): def __init__(self, id, transform=None): self.filenames = glob.glob("./tiny-imagenet-200/val/images/*.JPEG") self.transform = transform self.id_dict = id self.cls_dic = {} for i, line in enumerate(open('./tiny-imagenet-200/val/val_annotations.txt', 'r')): a = line.split('\t') img, cls_id = a[0],a[1] self.cls_dic[img] = self.id_dict[cls_id] def __len__(self): return len(self.filenames) def __getitem__(self, idx): img_path = self.filenames[idx] image = read_image(img_path) if image.shape[0] == 1: image = torch.cat((image,image,image),0) label = self.cls_dic[img_path.split('/')[-1]] if self.transform: image = self.transform(image.type(torch.FloatTensor)) return image, label trainset = TrainTinyImageNetDataset(id=id_dict, transform = transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2) testset = TestTinyImageNetDataset(id=id_dict, transform = transform) testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2) def accuracy(output, target, topk=(1,5)): """Computes the precision@k for the specified values of k prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) """ maxk = max(topk) # sizefunction: the number of total elements batch_size = target.size(0) # topk function selects the number of k before output _, pred = output.topk(maxk, 1, True, True) ##########Do not understand t()k pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res from torch.cuda.amp import autocast from functools import partial from contextlib import contextmanager from local_attention import LocalAttention from axial_positional_embedding import AxialPositionalEmbedding from performer_pytorch.reversible import ReversibleSequence, SequentialSequence def exists(val): return val is not None def empty(tensor): return tensor.numel() == 0 def default(val, d): return val if exists(val) else d @contextmanager def null_context(): yield # def cast_tuple(val): # return (val,) if not isinstance(val, tuple) else val def get_module_device(module): return next(module.parameters()).device def find_modules(nn_module, type): return [module for module in nn_module.modules() if isinstance(module, type)] class Always(nn.Module): def __init__(self, val): super().__init__() self.val = val def forward(self, *args, **kwargs): return self.val # kernel functions # transcribed from jax to pytorch from # https://github.com/google-research/google-research/blob/master/performer/fast_attention/jax/fast_attention.py def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device = None): b, h, *_ = data.shape data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1. ratio = (projection_matrix.shape[0] ** -0.5) projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h) projection = projection.type_as(data) data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection) diag_data = data ** 2 diag_data = torch.sum(diag_data, dim=-1) diag_data = (diag_data / 2.0) * (data_normalizer ** 2) diag_data = diag_data.unsqueeze(dim=-1) if is_query: data_dash = ratio * ( torch.exp(data_dash - diag_data - torch.max(data_dash, dim=-1, keepdim=True).values) + eps) else: data_dash = ratio * ( torch.exp(data_dash - diag_data - torch.max(data_dash)) + eps) return data_dash.type_as(data) def generalized_kernel(data, *, projection_matrix, kernel_fn = nn.ReLU(), kernel_epsilon = 0.001, normalize_data = True, device = None): b, h, *_ = data.shape data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1. if projection_matrix is None: return kernel_fn(data_normalizer * data) + kernel_epsilon projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h) projection = projection.type_as(data) data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection) data_prime = kernel_fn(data_dash) + kernel_epsilon return data_prime.type_as(data) def orthogonal_matrix_chunk(cols, device = None): unstructured_block = torch.randn((cols, cols), device = device) q, r = torch.qr(unstructured_block.cpu(), some = True) q, r = map(lambda t: t.to(device), (q, r)) return q.t() def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling = 0, device = None): nb_full_blocks = int(nb_rows / nb_columns) block_list = [] for _ in range(nb_full_blocks): q = orthogonal_matrix_chunk(nb_columns, device = device) block_list.append(q) remaining_rows = nb_rows - nb_full_blocks * nb_columns if remaining_rows > 0: q = orthogonal_matrix_chunk(nb_columns, device = device) block_list.append(q[:remaining_rows]) final_matrix = torch.cat(block_list) if scaling == 0: multiplier = torch.randn((nb_rows, nb_columns), device = device).norm(dim = 1) elif scaling == 1: multiplier = math.sqrt((float(nb_columns))) * torch.ones((nb_rows,), device = device) else: raise ValueError(f'Invalid scaling {scaling}') return torch.diag(multiplier) @ final_matrix # linear attention classes with softmax kernel # non-causal linear attention def linear_attention(q, k, v): k_cumsum = k.sum(dim = -2) D_inv = 1. / torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q)) context = torch.einsum('...nd,...ne->...de', k, v) out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv) # print("linear attention", out.size) return out class FastAttention(nn.Module): def __init__(self, dim_heads, nb_features = None, ortho_scaling = 0, causal = False, generalized_attention = False, kernel_fn = nn.ReLU(), no_projection = False): super().__init__() nb_features = default(nb_features, int(dim_heads * math.log(dim_heads))) self.dim_heads = dim_heads self.nb_features = nb_features self.ortho_scaling = ortho_scaling self.create_projection = partial(gaussian_orthogonal_random_matrix, nb_rows = self.nb_features, nb_columns = dim_heads, scaling = ortho_scaling) projection_matrix = self.create_projection() self.register_buffer('projection_matrix', projection_matrix) self.generalized_attention = generalized_attention self.kernel_fn = kernel_fn # if this is turned on, no projection will be used # queries and keys will be softmax-ed as in the original efficient attention paper self.no_projection = no_projection self.causal = causal @torch.no_grad() def redraw_projection_matrix(self, device): projections = self.create_projection(device = device) self.projection_matrix.copy_(projections) del projections def forward(self, q, k, v): device = q.device if self.no_projection: q = q.softmax(dim = -1) k = torch.exp(k) if self.causal else k.softmax(dim = -2) elif self.generalized_attention: create_kernel = partial(generalized_kernel, kernel_fn = self.kernel_fn, projection_matrix = self.projection_matrix, device = device) q, k = map(create_kernel, (q, k)) else: create_kernel = partial(softmax_kernel, projection_matrix = self.projection_matrix, device = device) q = create_kernel(q, is_query = True) k = create_kernel(k, is_query = False) attn_fn = linear_attention if not self.causal else self.causal_linear_fn out = attn_fn(q, k, v) # print('fastattention', out.size()) return out # a module for keeping track of when to update the projections class ProjectionUpdater(nn.Module): def __init__(self, instance, feature_redraw_interval): super().__init__() self.instance = instance self.feature_redraw_interval = feature_redraw_interval self.register_buffer('calls_since_last_redraw', torch.tensor(0)) def fix_projections_(self): self.feature_redraw_interval = None def redraw_projections(self): model = self.instance if not self.training: return if exists(self.feature_redraw_interval) and self.calls_since_last_redraw >= self.feature_redraw_interval: device = get_module_device(model) fast_attentions = find_modules(model, FastAttention) for fast_attention in fast_attentions: fast_attention.redraw_projection_matrix(device) self.calls_since_last_redraw.zero_() return self.calls_since_last_redraw += 1 def forward(self, x): raise NotImplemented # classes class Attention(nn.Module): def __init__( self, dim, causal = False, heads = 4, dim_head = 32, local_heads = 0, local_window_size = 256, nb_features = None, feature_redraw_interval = 1000, generalized_attention = False, kernel_fn = nn.ReLU(), dropout = 0., no_projection = False, qkv_bias = False, attn_out_bias = True ): super().__init__() assert dim % heads == 0, 'dimension must be divisible by number of heads' dim_head = default(dim_head, dim // heads) inner_dim = dim_head * heads self.fast_attention = FastAttention(dim_head, nb_features, causal = causal, generalized_attention = generalized_attention, kernel_fn = kernel_fn, no_projection = no_projection) self.heads = heads self.global_heads = heads - local_heads self.local_attn = LocalAttention(window_size = local_window_size, causal = causal, autopad = True, dropout = dropout, look_forward = int(not causal), rel_pos_emb_config = (dim_head, local_heads)) if local_heads > 0 else None self.to_q = nn.Linear(dim, inner_dim, bias = qkv_bias) self.to_k = nn.Linear(dim, inner_dim, bias = qkv_bias) self.to_v = nn.Linear(dim, inner_dim, bias = qkv_bias) self.to_out = nn.Linear(inner_dim, dim, bias = attn_out_bias) self.dropout = nn.Dropout(dropout) self.convert = nn.Sequential( Rearrange('b (h w) (p1 p2 c) -> b c (h p1) (w p2)',h = 32, w = 32, p1 = 1, p2 = 1) ) self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = 1, p2 = 1) ) def forward(self, x, pos_emb = None, context = None, mask = None, context_mask = None, **kwargs): x = self.to_patch_embedding(x) b, n, _, h, gh = *x.shape, self.heads, self.global_heads cross_attend = exists(context) context = default(context, x) context_mask = default(context_mask, mask) if not cross_attend else context_mask q, k, v = self.to_q(x), self.to_k(context), self.to_v(context) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v)) (q, lq), (k, lk), (v, lv) = map(lambda t: (t[:, :gh], t[:, gh:]), (q, k, v)) attn_outs = [] if not empty(q): if exists(context_mask): global_mask = context_mask[:, None, :, None] v.masked_fill_(~global_mask, 0.) if exists(pos_emb) and not cross_attend: q, k = apply_rotary_pos_emb(q, k, pos_emb) out = self.fast_attention(q, k, v) attn_outs.append(out) if not empty(lq): assert not cross_attend, 'local attention is not compatible with cross attention' out = self.local_attn(lq, lk, lv, input_mask = mask) attn_outs.append(out) out = torch.cat(attn_outs, dim = 1) out = rearrange(out, 'b h n d -> b n (h d)') # print("Attention", out.size()) out = self.to_out(out) out = self.dropout(out) return self.convert(out) class SelfAttention(Attention): def forward(self, *args, context = None, **kwargs): assert not exists(context), 'self attention should not receive context' # print(1, "self attention module") return super().forward(*args, **kwargs) import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat from einops.layers.torch import Rearrange # helper methods def group_dict_by_key(cond, d): return_val = [dict(), dict()] for key in d.keys(): match = bool(cond(key)) ind = int(not match) return_val[ind][key] = d[key] return (*return_val,) def group_by_key_prefix_and_remove_prefix(prefix, d): kwargs_with_prefix, kwargs = group_dict_by_key(lambda x: x.startswith(prefix), d) kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) return kwargs_without_prefix, kwargs # classes class LayerNorm(nn.Module): # layernorm, but done in the channel dimension #1 def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): std = torch.var(x, dim = 1, unbiased = False, keepdim = True).sqrt() mean = torch.mean(x, dim = 1, keepdim = True) return (x - mean) / (std + self.eps) * self.g + self.b class PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.norm = LayerNorm(dim) self.fn = fn def forward(self, x, **kwargs): x = self.norm(x) return self.fn(x, **kwargs) class FeedForward(nn.Module): def __init__(self, dim, mult = 4, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.Conv2d(dim, dim * mult, 1), nn.GELU(), nn.Dropout(dropout), nn.Conv2d(dim * mult, dim, 1), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class DepthWiseConv2d(nn.Module): def __init__(self, dim_in, dim_out, kernel_size, padding, stride, bias = True): super().__init__() self.net = nn.Sequential( nn.Conv2d(dim_in, dim_in, kernel_size = kernel_size, padding = padding, groups = dim_in, stride = stride, bias = bias), nn.BatchNorm2d(dim_in), nn.Conv2d(dim_in, dim_out, kernel_size = 1, bias = bias) ) def forward(self, x): return self.net(x) class Transformer(nn.Module): def __init__(self, dim, proj_kernel, kv_proj_stride, depth, heads, dim_head = 64, mlp_mult = 4, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) dim_head = 32 local_attn_heads = 0 local_window_size = 256 causal = False nb_features = None generalized_attention = False kernel_fn = nn.ReLU() attn_dropout = 0. no_projection = False qkv_bias = True attn_out_bias = True for _ in range(depth): self.layers.append(nn.ModuleList([ PreNorm(dim, SelfAttention(dim, causal = causal, heads = heads, dim_head = dim_head, local_heads = local_attn_heads, local_window_size = local_window_size, nb_features = nb_features, generalized_attention = generalized_attention, kernel_fn = kernel_fn, dropout = attn_dropout, no_projection = no_projection, qkv_bias = qkv_bias, attn_out_bias = attn_out_bias)), PreNorm(dim, FeedForward(dim, mlp_mult, dropout = dropout)) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return x class CvT(nn.Module): def __init__( self, *, num_classes, s1_emb_dim = 64, s1_emb_kernel = 7, s1_emb_stride = 4, s1_proj_kernel = 3, s1_kv_proj_stride = 2, s1_heads = 1, s1_depth = 1, s1_mlp_mult = 4, s2_emb_dim = 192, s2_emb_kernel = 3, s2_emb_stride = 2, s2_proj_kernel = 3, s2_kv_proj_stride = 2, s2_heads = 3, s2_depth = 2, s2_mlp_mult = 4, s3_emb_dim = 384, s3_emb_kernel = 3, s3_emb_stride = 2, s3_proj_kernel = 3, s3_kv_proj_stride = 2, s3_heads = 6, s3_depth = 10, s3_mlp_mult = 4, dropout = 0. ): super().__init__() kwargs = dict(locals()) dim = 3 layers = [] for prefix in ('s1', 's2', 's3'): config, kwargs = group_by_key_prefix_and_remove_prefix(f'{prefix}_', kwargs) layers.append(nn.Sequential( nn.Conv2d(dim, config['emb_dim'], kernel_size = config['emb_kernel'], padding = (config['emb_kernel'] // 2), stride = config['emb_stride']), LayerNorm(config['emb_dim']), Transformer(dim = config['emb_dim'], proj_kernel = config['proj_kernel'], kv_proj_stride = config['kv_proj_stride'], depth = config['depth'], heads = config['heads'], mlp_mult = config['mlp_mult'], dropout = dropout) )) dim = config['emb_dim'] self.layers = nn.Sequential( *layers, nn.AdaptiveAvgPool2d(1), Rearrange('... () () -> ...'), nn.Linear(dim, num_classes) ) def forward(self, x): return self.layers(x) model = CvT( num_classes = 200, s1_emb_dim = 128, # stage 1 - dimension s1_emb_kernel = 3, # stage 1 - conv kernel s1_emb_stride = 2, # stage 1 - conv stride s1_proj_kernel = 3, # stage 1 - attention ds-conv kernel size s1_kv_proj_stride = 1, # stage 1 - attention key / value projection stride s1_heads = 4, # stage 1 - heads s1_depth = 1, # stage 1 - depth s1_mlp_mult = 2, # stage 1 - feedforward expansion factor s2_emb_dim = 128, # stage 2 - (same as above) s2_emb_kernel = 3, s2_emb_stride = 1, s2_proj_kernel = 3, s2_kv_proj_stride = 1, s2_heads = 4, s2_depth = 1, s2_mlp_mult = 2, s3_emb_dim = 128, # stage 3 - (same as above) s3_emb_kernel = 3, s3_emb_stride = 1, s3_proj_kernel = 3, s3_kv_proj_stride = 1, s3_heads = 4, s3_depth = 2, s3_mlp_mult = 2, dropout = 0. ) model.to(device) print(summary(model, (3,64,64))) print(torch.cuda.get_device_properties(device)) # model.load_state_dict(torch.load('../input/levin1/LeViN (1).pth')) criterion = nn.CrossEntropyLoss() scaler = torch.cuda.amp.GradScaler() # optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) top1 = [] top5 = [] optimizer = optim.AdamW(model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False) for epoch in range(150): # loop over the dataset multiple times t0 = time.time() epoch_accuracy = 0 epoch_loss = 0 running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data[0].to(device), data[1].to(device) optimizer.zero_grad() outputs = model(inputs) with torch.cuda.amp.autocast(): loss = criterion(outputs, labels) scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() acc = (outputs.argmax(dim=1) == labels).float().mean() epoch_accuracy += acc / len(trainloader) epoch_loss += loss / len(trainloader) # print statistics running_loss += loss.item() if i % 200 == 199: # print every 2000 mini-batches print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000)) running_loss = 0.0 correct = 0 total = 0 correct_1=0 correct_5=0 c = 0 with torch.no_grad(): for data in testloader: images, labels = data[0].to(device), data[1].to(device) outputs = model(images) # outputs = net(images) _, predicted = torch.max(outputs.data, 1) res = accuracy(outputs, labels) correct_1 += res[0][0].float() correct_5 += res[1][0].float() total += labels.size(0) correct += (predicted == labels).sum().item() c += 1 print(f"Epoch : {epoch+1} - loss : {epoch_loss:.4f} - acc: {epoch_accuracy:.4f} - Top 1: {correct_1/c} - Top 5: {correct_5/c} - Time: {time.time() - t0}\n") top1.append(correct_1/c) top5.append(correct_5/c) if float(correct_1/c) >= float(max(top1)): PATH = 'CvP.pth' torch.save(model.state_dict(), PATH) print(1) print('Finished Training') ```
github_jupyter
# Sample for Stock Analyse ## Import the necessary lib ``` import datetime import pandas as pd import numpy as np from keras import models,layers,optimizers from keras.utils import to_categorical import matplotlib.pyplot as plt ``` ## Import the data and format it to target ``` testfile="E://notebook/shsz_stockdata/sh600002.csv" originaldata=pd.read_csv(testfile,encoding="gb2312") originaldata.head() originaldata.columns originaldata.columns.shape newindex=['Code', 'Name', 'Date', 'Industry', 'Concept', 'Region', 'Open', 'High', 'Low', 'Close', 'AfterPrice', 'PrePrice', 'Limit', 'Volume', 'Amount', 'HandoverRate', 'CurrentMarket', 'TotalMarket', 'TopLimit', 'BottomLimit', 'PETTM', 'PriceSaleTTM', 'CurrentTTM', 'PriceToBook', 'MA_5', 'MA_10', 'MA_20', 'MA_30', 'MA_60', 'MA_GCD', 'MACD_DIF', 'MACD_DEA', 'MACD_MACD', 'MACD_GCD', 'KDJ_K', 'KDJ_D', 'KDJ_J', 'KDJ_GCD', 'BrinlineMiddle', 'BrinlineUp', 'BrinlineDown', 'psy', 'psyma', 'rsi1', 'rsi2', 'rsi3', 'Amplitude', 'QuantityRelativeRatio'] originaldata.columns=newindex originaldata.head() originaldata.dtypes del originaldata["Code"] del originaldata["Name"] del originaldata["Industry"] del originaldata["KDJ_GCD"] del originaldata["Concept"] del originaldata["Region"] del originaldata["PrePrice"] del originaldata["PETTM"] del originaldata["PriceSaleTTM"] del originaldata["CurrentTTM"] del originaldata["MA_5"] del originaldata["MA_10"] del originaldata["MA_20"] del originaldata["MA_30"] del originaldata["MA_60"] del originaldata["MA_GCD"] del originaldata["MACD_DIF"] del originaldata["MACD_DEA"] del originaldata["MACD_MACD"] del originaldata["MACD_GCD"] del originaldata["BrinlineMiddle"] del originaldata["BrinlineUp"] del originaldata["BrinlineDown"] del originaldata["rsi1"] del originaldata["rsi2"] del originaldata["rsi3"] originaldata.isna().sum(axis=0) filleddata=originaldata.fillna(method="pad") filleddata.dtypes filleddata["Date"]=pd.to_datetime(filleddata["Date"]) filleddata.dtypes filleddata.head() filleddata=filleddata.set_index("Date",drop=True) filleddata.head() filleddata=filleddata.sort_index(ascending=True) def setlabel(x): if x<=0: return 0 if x>0 and x<=0.01: return 1 else: return 2 filleddata["Label"]=filleddata["Limit"].apply(setlabel) savelabel=filleddata["Label"] #Data normalization normalizeddata=(filleddata-filleddata.mean(axis=0))/filleddata.std(axis=0) normalizeddata["Label"]=savelabel #transform to array data=np.array(normalizeddata) ``` # Define the generate function for dataset ``` data[0] def datagenerator(data, lookback, delay, min_index, max_index,shuffle=False, batch_size=32, step=1): if max_index is None: max_index = len(data) - delay - 1 i = min_index + lookback while 1: if shuffle: rows = np.random.randint(min_index + lookback, max_index, size=batch_size) else: if i + batch_size >= max_index: i = min_index + lookback rows = np.arange(i, min(i + batch_size, max_index)) i += len(rows) samples = np.zeros((len(rows),lookback // step,data.shape[-1])) targets = np.zeros((len(rows),)) for j, row in enumerate(rows): indices = range(rows[j] - lookback, rows[j], step) samples[j] = data[indices] targets[j] = data[rows[j] + delay][-1] targets=to_categorical(targets) yield samples, targets lookback = 10 step = 1 delay = 11 batch_size = 32 trainnum=int(len(data)*0.6) valnum=int(len(data)*0.2) testnum=int(len(data)*0.2) train_gen = datagenerator(data, lookback=lookback, delay=delay, min_index=0, max_index=trainnum, shuffle=True, step=step, batch_size=batch_size) val_gen = datagenerator(data, lookback=lookback, delay=delay, min_index=trainnum, max_index=trainnum+valnum, step=step, batch_size=batch_size) test_gen = datagenerator(data, lookback=lookback, delay=delay, min_index=trainnum+valnum, max_index=None, step=step, batch_size=batch_size) val_steps = (valnum - lookback) // batch_size test_steps = (testnum - lookback) // batch_size ``` # Keras Models ## LSTM ``` basemodel = models.Sequential() basemodel.add(layers.Dense(512, activation='relu',input_shape=(None, data.shape[-1]))) basemodel.add(layers.Dropout(0.5)) basemodel.add(layers.LSTM(32,dropout=0.5)) basemodel.add(layers.Dense(512,activation="relu")) basemodel.add(layers.Dropout(0.5)) basemodel.add(layers.Dense(3,activation="softmax")) basemodel.summary() basemodel.compile(optimizer=optimizers.RMSprop(lr=0.001),loss="categorical_crossentropy",metrics=["acc"]) history=basemodel.fit_generator(train_gen,steps_per_epoch=32,epochs=20,validation_data=val_gen,validation_steps=val_steps) import matplotlib.pyplot as plt loss = history.history['loss'] val_loss = history.history['val_loss'] acc = history.history['acc'] val_acc = history.history['val_acc'] epochs = range(len(loss)) plt.figure(figsize=(15,8)) plt.plot(epochs, loss, 'b', c="blue",label='Training loss') plt.plot(epochs, val_loss, 'b', c="green",label='Validation loss') plt.plot(epochs, acc, 'b', c="black",label='Training acc') plt.plot(epochs, val_acc, 'b', c="red",label='Validation acc') plt.title('Training and Validation Curve') plt.legend() plt.show() ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import functools ``` # Overview ``` # This notebook aims to help the reader understand the NSGA-2 algorithm decribed in the paper: # A Fast and Elitist Multiobjective Genetic Algorithm: NSGA-II # Kalyanmoy Deb, Associate Member, IEEE, Amrit Pratap, Sameer Agarwal, and T. Meyarivan # This tutorial assumes familiarity with genetic algorithms, but no prior knowladge on multi objective optimization # NOTE, in all the examples optimize means maximize, this is a convention. # Content: # - Concepts: Domination, Multi objective optimization, Pareto front # - A real world example: Starcraft build orders # - A simple example # - Brute force solution # - Non dominated sorting: naive approach (worst case complexity O(M N^3)) # - Non dominated sorting: fast approach (worst case complexity O(M N^2) ) # - Diversity Preservation # - Putting it all togeather # - Solving the toy problem # - Solving a more complex problem # - Making it faster ``` ### Concepts ``` # With Multiobjective optimization, we simultaneously optimize for multiple objectives. # The goal of the multiobjective optimization is to find a set of solutions which is not dominated by any other solution. # But what is domination? # A solution dominates another solution if in all of the objectives it is better or equal, and in at least one it is better. def dominates(fitnesses_1,fitnesses_2): # fitnesses_1 is a array of objectives of solution 1 [objective1, objective2 ...] larger_or_equal = fitnesses_1 >= fitnesses_2 larger = fitnesses_1 > fitnesses_2 if np.all(larger_or_equal) and np.any(larger): return True return False # A set of solutions which do not dominate each other is called a Pareto front. # When there is no solution which dominates any of members of the front, it is called an optimal Pareto front. # The goeal of multiobjective optimization is to find the optimal Pareto front. ``` ### A real world example: Starcraft build orders ``` # People were using genetic algorithms to find optimal build orders for stracraft. # How would we use multiobjective optimization for this? # One objective can be the size of our economy after 5 minutes # Another objective can be the size of our army after 5 minutes # The goal is to find a set of build orders (with various tradoff between the objectives), which are not dominated by any other build orders. ``` ### A simple example ``` # optimize the scalar x # for objectives obj1 and obj2: def obj1(x): return 1 - x * x def obj2(x): return 1 - (x-0.5) * (x-0.5) # Let us plot the objetives for all possible x x = np.linspace(-1,1,100) plt.plot(x,obj1(x)) plt.plot(x,obj2(x)) plt.xlabel("X") plt.ylabel("Objectives") plt.legend(["obj1","obj2"]) plt.show() # and plot the objectives against each other plt.plot(obj2(x),obj1(x)) plt.xlabel("obj2") plt.ylabel("obj1") ``` ### A brute force solution ``` # Let solve this toy example by comparing evey possible solution to each other, and get the set which no other solution dominates all_possible_solution = np.linspace(-1,1,100) all_fitnesses = np.stack( [obj1(all_possible_solution),obj2(all_possible_solution)],axis=1) nondominated_indicies = [] for i in range(len(all_possible_solution)): dominated = False for j in range(len(all_possible_solution)): if dominates(all_fitnesses[j],all_fitnesses[i]): dominated = True break if dominated is False: nondominated_indicies.append(i) nondominated_solutions = all_possible_solution[nondominated_indicies] # Plot the optimal values togeather with all values plt.plot(x,obj1(x)) plt.plot(x,obj2(x)) plt.plot(x[nondominated_indicies],obj1(x)[nondominated_indicies],color="red") plt.plot(x[nondominated_indicies],obj2(x)[nondominated_indicies],color="red") plt.xlabel("X") plt.ylabel("Objectives") plt.legend(["obj1","obj2","optimal obj"]) plt.show() plt.plot(obj2(x),obj1(x)) plt.plot(obj2(x)[nondominated_indicies],obj1(x)[nondominated_indicies],color="red") plt.xlabel("obj2") plt.ylabel("obj1") plt.legend(["all possible solutions","optimal pareto front"]) ``` ### Non dominated sorting: naive approach (worst case complexity O(M N^3)) ``` # To use multiobjectve optimization with genetic algorithms, we need to rank individuals # To do this, we calculate which front they belong to. # Individuals which are not dominated belong to the fisrt front. # Then we temporarily remove the first front, and calculate the set of individuals that are not dominated in the reduced set. # This becomes the second front and so on. # For this example, we start with a list of fitness values for each objective. (randomly generated) # Normally we would get this values by evaluating solutions on a task. pop_size = 100 all_fitnesses = np.random.rand(pop_size,2) fronts = [] remaining_indicies = list(range(pop_size)) while True: non_dominated_indicies = [] for i in remaining_indicies: dominated = False for j in remaining_indicies: if dominates(all_fitnesses[j],all_fitnesses[i]): dominated = True break if dominated is False: non_dominated_indicies.append(i) print("Front: ",non_dominated_indicies) # remove current front from remaining indicies fronts.append(non_dominated_indicies) remaining_indicies = [x for x in remaining_indicies if x not in non_dominated_indicies] if len(remaining_indicies) == 0: print("Done") break # Let us plot the fronts for i in range(len(fronts)): # before plotting, sort by obj 1 sorted_front = sorted(fronts[i],key=lambda x : all_fitnesses[x,0]) plt.plot(all_fitnesses[sorted_front,0],all_fitnesses[sorted_front,1])#,".") plt.xlabel("obj1") plt.ylabel("obj2") ``` ### Non dominated sorting: fast approach (worst case complexity O(M N^2) ) ``` # The fast implementation of non dominated sort, as described in the original NSGA-II paper # We pre calculate the domination set for each individual (the set of other individuals this individual dominates) # And we precalculate the domination counts (how many other individuals dominates this individual) # Once we have these two things, calculating the fronts are simple: # - Current front is the individuals whose domination count is 0 # - Then visit everyone in the current front's domination set, and reduce their domination count. # - Remove current front and Repeat def calculate_pareto_fronts(fitnesses): # Calculate dominated set for each individual domination_sets = [] domination_counts = [] for fitnesses_1 in fitnesses: current_dimination_set = set() domination_counts.append(0) for i,fitnesses_2 in enumerate(fitnesses): if dominates(fitnesses_1,fitnesses_2): current_dimination_set.add(i) elif dominates(fitnesses_2,fitnesses_1): domination_counts[-1] += 1 domination_sets.append(current_dimination_set) domination_counts = np.array(domination_counts) fronts = [] while True: current_front = np.where(domination_counts==0)[0] if len(current_front) == 0: #print("Done") break #print("Front: ",current_front) fronts.append(current_front) for individual in current_front: domination_counts[individual] = -1 # this individual is already accounted for, make it -1 so ==0 will not find it anymore dominated_by_current_set = domination_sets[individual] for dominated_by_current in dominated_by_current_set: domination_counts[dominated_by_current] -= 1 return fronts # We use all_fitnesses defined in the previous example fronts = calculate_pareto_fronts(all_fitnesses) # Let us plot the fronts legends = [] for i in range(len(fronts)): # before plotting, sort by obj 1 sorted_front = sorted(fronts[i],key=lambda x : all_fitnesses[x,0]) plt.plot(all_fitnesses[sorted_front,0],all_fitnesses[sorted_front,1])#,".") plt.xlabel("obj1") plt.ylabel("obj2") ``` ### Diversity Preservation (crowding metric) ``` # An important aspect of multiobjective optimization is that we explore the different possible tradoffs. # To do this effectively we need to encourage a diversity in the population. # NSGA-II uses a crowding metric to do this. # The crowding metric is based on how close the neerest neighbors in the same front are for each objective. def calculate_crowding_metrics(fitnesses,fronts): num_objectives = fitnesses.shape[1] num_individuals = fitnesses.shape[0] # Normalise each objectives, so they are in the range [0,1] # This is necessary, so each objective's contribution have the same magnitude to the crowding metric. normalized_fitnesses = np.zeros_like(fitnesses) for objective_i in range(num_objectives): min_val = np.min(fitnesses[:,objective_i]) max_val = np.max(fitnesses[:,objective_i]) val_range = max_val - min_val normalized_fitnesses[:,objective_i] = (fitnesses[:,objective_i] - min_val) / val_range fitnesses = normalized_fitnesses crowding_metrics = np.zeros(num_individuals) for front in fronts: for objective_i in range(num_objectives): sorted_front = sorted(front,key = lambda x : fitnesses[x,objective_i]) crowding_metrics[sorted_front[0]] = np.inf crowding_metrics[sorted_front[-1]] = np.inf if len(sorted_front) > 2: for i in range(1,len(sorted_front)-1): crowding_metrics[sorted_front[i]] += fitnesses[sorted_front[i+1],objective_i] - fitnesses[sorted_front[i-1],objective_i] return crowding_metrics # Let us plot the crowding metric for the previous example crowding_metrics = calculate_crowding_metrics(all_fitnesses,fronts) crowding_metrics[crowding_metrics == np.inf] = np.max(crowding_metrics[crowding_metrics != np.inf]) # replace inf with max import seaborn for i in range(len(fronts)): # before plotting, sort by obj 1 sorted_front = sorted(fronts[i],key=lambda x : all_fitnesses[x,0]) plt.plot(all_fitnesses[sorted_front,0],all_fitnesses[sorted_front,1])#,".") seaborn.scatterplot(x=all_fitnesses[sorted_front,0],y=all_fitnesses[sorted_front,1],size=crowding_metrics[sorted_front],legend=False) plt.xlabel("obj1") plt.ylabel("obj2") plt.title("Point size indicates crowdedness (smaller more crowded)") ``` ### Sorting with domination and crowding ``` # For sorting the population we need both the nondomination rank and the crowding metric # We always consider the nondomination rank first, but in a tie we use the crowding metric # helper function def fronts_to_nondomination_rank(fronts): nondomination_rank_dict = {} for i,front in enumerate(fronts): for x in front: nondomination_rank_dict[x] = i return nondomination_rank_dict def nondominated_sort(nondomination_rank_dict,crowding): num_individuals = len(crowding) indicies = list(range(num_individuals)) def nondominated_compare(a,b): # returns 1 if a dominates b, or if they equal, but a is less crowded # return -1 if b dominates a, or if they equal, but b is less crowded # returns 0 if they are equal in every sense if nondomination_rank_dict[a] > nondomination_rank_dict[b]: # domination rank, smaller better return -1 elif nondomination_rank_dict[a] < nondomination_rank_dict[b]: return 1 else: if crowding[a] < crowding[b]: # crowding metrics, larger better return -1 elif crowding[a] > crowding[b]: return 1 else: return 0 non_domiated_sorted_indicies = sorted(indicies,key = functools.cmp_to_key(nondominated_compare),reverse=True) # decreasing order, the best is the first return non_domiated_sorted_indicies # Test nondomination_rank_dict = fronts_to_nondomination_rank(fronts) sorted_indicies = nondominated_sort(nondomination_rank_dict,crowding_metrics) ``` ### Putting it all togeather ``` # Some generic GA functions def touranment_selection(num_parents,num_offspring): offspring_parents = [] for _ in range(num_offspring): contestants = np.random.randint(0,num_parents,2) # generate 2 random numbers, take the smaller (parent list is already sorted, smaller index, better) winner = np.min(contestants) offspring_parents.append(winner) return offspring_parents # simple mutation def get_mutated_copy(parent,min_val,max_val,mutation_power_ratio): mutation_power = (max_val - min_val) * mutation_power_ratio offspring = parent.copy() offspring += np.random.normal(0,mutation_power,size = offspring.shape) offspring = np.clip(offspring,min_val,max_val) return offspring def NSGA2_create_next_generation(pop,fitnesses,config): # algorithm and task parameters half_pop_size = config["half_pop_size"] problem_dim = config["problem_dim"] gene_min_val = config["gene_min_val"] gene_max_val = config["gene_max_val"] mutation_power_ratio = config["mutation_power_ratio"] # calculate the pareto fronts and crowding metrics fronts = calculate_pareto_fronts(fitnesses) nondomination_rank_dict = fronts_to_nondomination_rank(fronts) crowding = calculate_crowding_metrics(fitnesses,fronts) # Sort the population non_domiated_sorted_indicies = nondominated_sort(nondomination_rank_dict,crowding) # The better half of the population survives to the next generation and have a chance to reproduce # The rest of the population is discarded surviving_individuals = pop[non_domiated_sorted_indicies[:half_pop_size]] #print(len(surviving_individuals)) reproducing_individual_indicies = touranment_selection(num_parents=half_pop_size,num_offspring=half_pop_size) offsprings = np.array([get_mutated_copy(surviving_individuals[i],gene_min_val,gene_max_val,mutation_power_ratio) for i in reproducing_individual_indicies]) new_pop = np.concatenate([surviving_individuals,offsprings]) # concatenate the 2 lists return new_pop ``` ### Solving the toy problem ``` def simple_1d_fitness_func(x): objective_1 = 1-(x * x) objective_2 = 1-((x-0.5) * (x-0.5)) return np.stack([objective_1,objective_2],axis=1) config = { "half_pop_size" : 20, "problem_dim" : 1, "gene_min_val" : -1, "gene_max_val" : 1, "mutation_power_ratio" : 0.05, } pop = np.random.uniform(config["gene_min_val"],config["gene_max_val"],2*config["half_pop_size"]) mean_fitnesses = [] for generation in range(30): # evaluate pop fitnesses = simple_1d_fitness_func(pop) mean_fitnesses.append(np.mean(fitnesses,axis=0)) # transition to next generation pop = NSGA2_create_next_generation(pop,fitnesses,config) # Check if we found the same solutions as the brute force method x = np.linspace(-1,1,100) all_solutions_fitnesses = simple_1d_fitness_func(x) plt.plot(all_solutions_fitnesses[:,0],all_solutions_fitnesses[:,1]) plt.plot(fitnesses[:config["half_pop_size"],0],fitnesses[:config["half_pop_size"],1],".",color="red") plt.xlabel("obj1") plt.ylabel("obj2") ``` ### Solving a more complex problem ``` # This is one of the problems from the original paper: FON def FON_fitness_func(x): val = 1 / np.sqrt(3) objective_1 = np.exp( - ( (x[:,0]-val)*(x[:,0]-val) + (x[:,1]-val)*(x[:,1]-val) + (x[:,2]-val)*(x[:,2]-val))) objective_2 = np.exp( - ( (x[:,0]+val)*(x[:,0]+val) + (x[:,1]+val)*(x[:,1]+val) + (x[:,2]+val)*(x[:,2]+val))) return np.stack([objective_1,objective_2],axis=1) config = { "half_pop_size" : 20, "problem_dim" : 3, "gene_min_val" : -4, "gene_max_val" : 4, "mutation_power_ratio" : 0.05, } pop = np.random.uniform(config["gene_min_val"],config["gene_max_val"],(2*config["half_pop_size"],3)) mean_fitnesses = [] for generation in range(50): # evaluate pop fitnesses = FON_fitness_func(pop) mean_fitnesses.append(np.mean(fitnesses,axis=0)) # transition to next generation pop = NSGA2_create_next_generation(pop,fitnesses,config) # To compare the found solutions with other values, we check all solutions in a grid grid_3d = [] for x in np.linspace(-2,2,20): for y in np.linspace(-2,2,20): for z in np.linspace(-2,2,20): grid_3d.append([x,y,z]) grid_3d = np.array(grid_3d) all_fitnesses = FON_fitness_func(grid_3d) plt.plot(all_fitnesses[:,0],all_fitnesses[:,1],".") plt.plot(fitnesses[:config["half_pop_size"],0],fitnesses[:config["half_pop_size"],1],".",color="red") plt.xlabel("obj1") plt.ylabel("obj2") plt.legend(["grid solutions","evolved solutions"]) ``` ### Making it production ready ``` # If the population size is large, the computation can be quite slow. # The slowest part is calulating the fronts, which incudes a nested for loops to check who dominates who pop_size = 2000 all_fitnesses = np.random.rand(pop_size,2) %%time fronts = calculate_pareto_fronts(all_fitnesses) # To speed up this nested for loop, we can vectorize it with numpy. # This means instead of calulating values one by one in a python loop, we calculate them all at once with numpy. # There still going to be a loop inside numpy, but that loop is written in C, and will run much faster. # To do this, we precalculate the domination matrix, so we can replace: # dominates(fitnesses[i],fitnesses[i]) with domination_matrix[i,j] def calculate_domination_matrix(fitnesses): pop_size = fitnesses.shape[0] num_objectives = fitnesses.shape[1] fitness_grid_x = np.zeros([pop_size,pop_size,num_objectives]) fitness_grid_y = np.zeros([pop_size,pop_size,num_objectives]) for i in range(pop_size): fitness_grid_x[i,:,:] = fitnesses[i] fitness_grid_y[:,i,:] = fitnesses[i] larger_or_equal = fitness_grid_x >= fitness_grid_y larger = fitness_grid_x > fitness_grid_y return np.logical_and(np.all(larger_or_equal,axis=2),np.any(larger,axis=2)) def fast_calculate_pareto_fronts(fitnesses): # Calculate dominated set for each individual domination_sets = [] domination_counts = [] domination_matrix = calculate_domination_matrix(fitnesses) pop_size = fitnesses.shape[0] for i in range(pop_size): current_dimination_set = set() domination_counts.append(0) for j in range(pop_size): if domination_matrix[i,j]: current_dimination_set.add(j) elif domination_matrix[j,i]: domination_counts[-1] += 1 domination_sets.append(current_dimination_set) domination_counts = np.array(domination_counts) fronts = [] while True: current_front = np.where(domination_counts==0)[0] if len(current_front) == 0: #print("Done") break #print("Front: ",current_front) fronts.append(current_front) for individual in current_front: domination_counts[individual] = -1 # this individual is already accounted for, make it -1 so ==0 will not find it anymore dominated_by_current_set = domination_sets[individual] for dominated_by_current in dominated_by_current_set: domination_counts[dominated_by_current] -= 1 return fronts %%time fronts = fast_calculate_pareto_fronts(all_fitnesses) # The new version is around 24 times faster on my machine ```
github_jupyter
# Continuous Control --- In this notebook, you will learn how to use the Unity ML-Agents environment for the second project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893) program. ### 1. Start the Environment We begin by importing the necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/). ``` from unityagents import UnityEnvironment import numpy as np ``` Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded. - **Mac**: `"path/to/Reacher.app"` - **Windows** (x86): `"path/to/Reacher_Windows_x86/Reacher.exe"` - **Windows** (x86_64): `"path/to/Reacher_Windows_x86_64/Reacher.exe"` - **Linux** (x86): `"path/to/Reacher_Linux/Reacher.x86"` - **Linux** (x86_64): `"path/to/Reacher_Linux/Reacher.x86_64"` - **Linux** (x86, headless): `"path/to/Reacher_Linux_NoVis/Reacher.x86"` - **Linux** (x86_64, headless): `"path/to/Reacher_Linux_NoVis/Reacher.x86_64"` For instance, if you are using a Mac, then you downloaded `Reacher.app`. If this file is in the same folder as the notebook, then the line below should appear as follows: ``` env = UnityEnvironment(file_name="Reacher.app") ``` ``` env = UnityEnvironment(file_name='Reacher.exe') ``` Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python. ``` # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] ``` ### 2. Examine the State and Action Spaces In this environment, a double-jointed arm can move to target locations. A reward of `+0.1` is provided for each step that the agent's hand is in the goal location. Thus, the goal of your agent is to maintain its position at the target location for as many time steps as possible. The observation space consists of `33` variables corresponding to position, rotation, velocity, and angular velocities of the arm. Each action is a vector with four numbers, corresponding to torque applicable to two joints. Every entry in the action vector must be a number between `-1` and `1`. Run the code cell below to print some information about the environment. ``` # reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents num_agents = len(env_info.agents) print('Number of agents:', num_agents) # size of each action action_size = brain.vector_action_space_size print('Size of each action:', action_size) # examine the state space states = env_info.vector_observations state_size = states.shape[1] print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size)) print('The state for the first agent looks like:', states[0]) ``` ### 3. Take Random Actions in the Environment In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment. Once this cell is executed, you will watch the agent's performance, if it selects an action at random with each time step. A window should pop up that allows you to observe the agent, as it moves through the environment. Of course, as part of the project, you'll have to change the code so that the agent is able to use its experience to gradually choose better actions when interacting with the environment! ``` env_info = env.reset(train_mode=False)[brain_name] # reset the environment states = env_info.vector_observations # get the current state (for each agent) scores = np.zeros(num_agents) # initialize the score (for each agent) while True: actions = np.random.randn(num_agents, action_size) # select an action (for each agent) actions = np.clip(actions, -1, 1) # all actions between -1 and 1 env_info = env.step(actions)[brain_name] # send all actions to tne environment next_states = env_info.vector_observations # get next state (for each agent) rewards = env_info.rewards # get reward (for each agent) dones = env_info.local_done # see if episode finished scores += env_info.rewards # update the score (for each agent) states = next_states # roll over states to next time step if np.any(dones): # exit loop if episode finished break print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores))) ``` When finished, you can close the environment. ``` env.close() ``` ### 4. It's Your Turn! Now it's your turn to train your own agent to solve the environment! When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following: ```python env_info = env.reset(train_mode=True)[brain_name] ``` ### Import agent ``` # import Agent from ddpg_agent from collections import deque import matplotlib.pyplot as plt %matplotlib inline import torch from ddpg_agent import Agent ``` ### Implement DDPG train function The agent will be trained until the average score over 20 agents is at least more than 30 ``` def ddpg(n_episodes=300, max_t=1000, print_every=100): """ Agent train function Arguments: n_episodes - Number of episode, default is 300 max_t - max time step, 1000 print_every - every print_every step to print an average score """ scores_deque = deque(maxlen=print_every) scores, ma_scores = [], [] for i_episode in range(1, n_episodes+1): env_info = env.reset(train_mode=True)[brain_name] agent.reset() states = env_info.vector_observations # get the current state episode_scores = np.zeros(num_agents) for t in range(max_t): actions = agent.act(states) # select an action env_info = env.step(actions)[brain_name] # send the action to the environment next_states = env_info.vector_observations # get the next state rewards = env_info.rewards # get the reward dones = env_info.local_done # see if episode has finished agent.step(states, actions, rewards, next_states, dones) # take step with agent (including learning) episode_scores += rewards # update the score states = next_states # roll over the state to next time step if np.any(dones): # exit loop if episode finished break scores_deque.append(np.mean(episode_scores)) # save most recent score scores.append(np.mean(episode_scores)) ma_scores.append(np.mean(scores_deque)) # moving average print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)), end="") torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth') torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth') if i_episode % 10 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque))) if np.mean(scores_deque) >=30.0 : print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque))) torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth') torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth') break return scores, ma_scores ``` ### Create an Agent and train Create an agent with input state, action, number of agents value.<br> n_episodes, max_t, print_every input argurments are configurable ``` # Create Agent random_seed = 10 agent = Agent(state_size=state_size, action_size=action_size, num_agents=num_agents, random_seed=random_seed) scores, ma_scores = ddpg(n_episodes=200, print_every=100) ``` ### 8. Plot average score Check how many episodes are need to meet performance threshold, +30 average score over 100 episodes And you can check overall average score values per episodes. ``` # plot reward, avg score fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(1, len(scores)+1), scores, label='scores per episode') plt.plot(np.arange(1, len(scores)+1), ma_scores, c='r', label='avg score over 100 episodes') plt.ylabel('Score') plt.xlabel('Episode #') plt.legend(loc='lower right') plt.show() # Environment Close env.close() ```
github_jupyter
# Módulo 4: APIs ## Spotify <img src="https://developer.spotify.com/assets/branding-guidelines/logo@2x.png" width=400></img> En este módulo utilizaremos APIs para obtener información sobre artistas, discos y tracks disponibles en Spotify. Pero primero.. ¿Qué es una **API**?<br> Por sus siglas en inglés, una API es una interfaz para programar aplicaciones (*Application Programming Interface*). Es decir que es un conjunto de funciones, métodos, reglas y definiciones que nos permitirán desarrollar aplicaciones (en este caso un scraper) que se comuniquen con los servidores de Spotify. Las APIs son diseñadas y desarrolladas por las empresas que tienen interés en que se desarrollen aplicaciones (públicas o privadas) que utilicen sus servicios. Spotify tiene APIs públicas y bien documentadas que estaremos usando en el desarrollo de este proyecto. #### REST Un término se seguramente te vas a encontrar cuando estés buscando información en internet es **REST** o *RESTful*. Significa *representational state transfer* y si una API es REST o RESTful, implica que respeta unos determinados principios de arquitectura, como por ejemplo un protocolo de comunicación cliente/servidor (que será HTTP) y (entre otras cosas) un conjunto de operaciones definidas que conocemos como **métodos**. Ya veníamos usando el método GET para hacer solicitudes a servidores web. #### Documentación Como mencioné antes, las APIs son diseñadas por las mismas empresas que tienen interés en que se desarrollen aplicaciones (públicas o privadas) que consuman sus servicios o información. Es por eso que la forma de utilizar las APIs variará dependiendo del servicio que querramos consumir. No es lo mismo utilizar las APIs de Spotify que las APIs de Twitter. Por esta razón es de suma importancia leer la documentación disponible, generalmente en la sección de desarrolladores de cada sitio. Te dejo el [link a la de Spotify](https://developer.spotify.com/documentation/) #### JSON Json significa *JavaScript Object Notation* y es un formato para describir objetos que ganó tanta popularidad en su uso que ahora se lo considera independiente del lenguaje. De hecho, lo utilizaremos en este proyecto por más que estemos trabajando en Python, porque es la forma en la que obtendremos las respuestas a las solicitudes que realicemos utilizando las APIs. Para nosotros, no será ni más ni menos que un diccionario con algunas particularidades que iremos viendo a lo largo del curso. Links útiles para la clase: - [Documentación de Spotify - Artistas](https://developer.spotify.com/documentation/web-api/reference/artists/) - [Iron Maiden en Spotify](https://open.spotify.com/artist/6mdiAmATAx73kdxrNrnlao) ``` import requests id_im = '6mdiAmATAx73kdxrNrnlao' url_base = 'https://api.spotify.com/v1' ep_artist = '/artists/{artist_id}' url_base+ep_artist.format(artist_id=id_im) r = requests.get(url_base+ep_artist.format(artist_id=id_im)) r.status_code r.json() token_url = 'https://accounts.spotify.com/api/token' params = {'grant_type': 'client_credentials'} headers = {'Authorization': 'Basic NDRiN2IzNmVjMTQ1NDY3ZjlhOWVlYWY3ZTQxN2NmOGI6N2I0YWE3YTBlZjQ4NDQwNDhhYjFkMjI0MzBhMWViMWY='} r = requests.post(token_url, data=params, headers=headers) r.status_code r.json() token = r.json()['access_token'] token header = {"Authorization": "Bearer {}".format(token)} r = requests.get(url_base+ep_artist.format(artist_id=id_im), headers=header) r.status_code r.json() url_busqueda = 'https://api.spotify.com/v1/search' search_params = {'q': "Iron+Maiden", 'type':'artist', 'market':'AR'} busqueda = requests.get(url_busqueda, headers=header, params=search_params) busqueda.status_code busqueda.json() import pandas as pd df = pd.DataFrame(busqueda.json()['artists']['items']) df.head() df.sort_values(by='popularity', ascending=False).iloc[0]['id'] import base64 def get_token(client_id, client_secret): encoded = base64.b64encode(bytes(client_id+':'+client_secret, 'utf-8')) params = {'grant_type':'client_credentials'} header={'Authorization': 'Basic ' + str(encoded, 'utf-8')} r = requests.post('https://accounts.spotify.com/api/token', headers=header, data=params) if r.status_code != 200: print('Error en la request.', r.json()) return None print('Token válido por {} segundos.'.format(r.json()['expires_in'])) return r.json()['access_token'] client_id = '44b7b36ec145467f9a9eeaf7e417cf8b' client_secret = '7b4aa7a0ef4844048ab1d22430a1eb1f' ``` ## Clase 4
github_jupyter
<a href="https://colab.research.google.com/github/cxbxmxcx/Evolutionary-Deep-Learning/blob/main/EDL_2_Options.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Setup ``` #@title Defining Imports #numpy import numpy as np #PyTorch import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F import torch.optim as optim from torch.utils.data import TensorDataset, DataLoader #plotting from matplotlib import pyplot as plt #for performance timing import time results = [] #@title Setting Hyperparameters hp_test = "MSE Loss " #@param {type:"string"} learning_rate = 3.5e-03 epochs = 500 middle_layer = 25 batch_size = 5 data_step = 1 ``` # Creating Data ``` #@title Creating data from function and reshaping def function(X): return X * X + 5. X = np.reshape(np.arange(0,10, data_step), (-1, 1)) y = function(X) inputs = X.shape[1] y = y.reshape(-1, 1) plt.plot(X, y, 'o', color='black') tensor_x = torch.Tensor(X) # transform to torch tensor tensor_y = torch.Tensor(y) dataset = TensorDataset(tensor_x,tensor_y) # create your datset dataloader = DataLoader(dataset, batch_size= batch_size, shuffle=True) # create your dataloader ``` # Building the Model ``` #@title Define the Model class Net(nn.Module): def __init__(self, inputs, middle): super().__init__() self.fc1 = nn.Linear(inputs,middle) self.fc2 = nn.Linear(middle,middle) #self.fc3 = nn.Linear(middle, middle) self.out = nn.Linear(middle,1) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) #x = F.relu(self.fc3(x)) x = self.out(x) return x #@title Create the model model = Net(inputs, middle_layer) print(model) #@title Define loss function (criterion) and optimizer loss_fn = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=learning_rate) #@title Training the Model history = [] start = time.time() for i in range(epochs): for X, y in iter(dataloader): # wrap the data in variables x_batch = Variable(torch.Tensor(X)) y_batch = Variable(torch.Tensor(y)) # forward pass y_pred = model(x_batch) # compute and print loss loss = loss_fn(y_pred, y_batch) history.append(loss.data) # reset gradients optimizer.zero_grad() # backwards pass loss.backward() # step the optimizer - update the weights optimizer.step() print(f"[{i}] Loss = {loss.data}") plt.plot(history) end = time.time() - start X_a = torch.rand(100,1).clone() * 10 y_a = model(X_a) y_a = y_a.detach().numpy() results.append([hp_test,end, X_a, y_a]) fig = plt.figure() ax1 = fig.add_subplot(111) for test,t,x,y in results: ax1.scatter(x, y, s=10, marker="s", label=f"{test} in {t:0.1f}s") plt.legend(loc='upper left'); plt.show() ```
github_jupyter
Ejercicio del set de imágenes CIFAR10- Clasificando imágenes diversas - PYTORCH - Andrés de la Rosa ``` #Importacion de todos los modulos import time import matplotlib.pyplot as plt import numpy as np from keras.models import Sequential from keras.preprocessing.image import ImageDataGenerator from keras.layers import Dense from keras.layers import Dropout from keras.layers import Flatten from keras.constraints import maxnorm from keras.optimizers import SGD from keras.layers import Activation from keras.layers.convolutional import Conv2D from keras.layers.convolutional import MaxPooling2D from keras.layers.normalization import BatchNormalization from keras.utils import np_utils from keras import backend as K import tensorflow as tf import multiprocessing as mp from keras.datasets import cifar10 import os ``` Esta vez cargo el CIFAR10 directamente de los datasets de Keras y no de la pagina de donde estaban originalmente como hice en el ejercicio de TensorFlow. Esto facilitó mucho el trabajo debido a que no tuve que definir las funciones que hacian el preprocesamiento de las imágenes. teniendo en cuenta que al momento de utilizar mis propias imágenes tendré que hacer dicho pre procesamiento ``` batch_size = 32 num_classes = 10 epochs = 5 (x_train, y_train), (x_test, y_test) = cifar10.load_data() # x_train - training data(images), y_train - labels(digits) print(x_train.shape[0], 'imagenes de entrenamiento') print(x_test.shape[0], 'imagenes de test') #Convirtiendo a one hot encoding y_train = np_utils.to_categorical(y_train, num_classes) y_test = np_utils.to_categorical(y_test, num_classes) x_train = x_train.astype('float32') x_test = x_test.astype('float32') #Normalizando la entrada x_train /= 255 x_test /= 255 #Definiendo el modelo de 3 capas con relu y max pooling, haciendo el same padding para no perder pixeles model = Sequential() #Primera capa con relu y maxpooling model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:])) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.3)) #Segunda capa con relu y maxpooling model.add(Conv2D(64, (3, 3), padding='same', input_shape=x_train.shape[1:])) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.3)) #Tercera capa con relu y maxpooling model.add(Conv2D(128, (3, 3), padding='same', input_shape=x_train.shape[1:])) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.3)) #Poniendo los datos en formato flat model.add(Flatten()) #Finalizando con los Fully connected layers model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.3)) model.add(Dense(num_classes)) # Compilamos el modelo para calcular su acierto model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) opt = SGD(lr=0.001, momentum=0.9, decay=1e-6, nesterov=False) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.2, shuffle=True) ```
github_jupyter
``` import pickle as pk import matplotlib.pyplot as plt import os import numpy as np result_file_low = 'results_100.0.p' result_file_high = 'results_0.01.p' with open(result_file_low, 'rb') as f: results_low = pk.load(f, encoding='latin1') with open(result_file_high, 'rb') as f: results_high = pk.load(f, encoding='latin1') def histo_plot(filname, title, list_histo): bins = 50 alpha_val = 0.4 n, bins, patches = plt.hist([i[0] for i in list_histo], bins,color=[i[1] for i in list_histo] , alpha=alpha_val) # for histo in list_histo: # n, bins, patches = plt.hist(histo[0], bins,color=histo[1] , alpha=alpha_val) plt.ylabel('Count') plt.title(title) plt.grid(True) plt.savefig(os.path.join('histograms',filname)) # plt.show() plt.close() for result_index, filename, title in [(0,'Erdos_Renyi_high.png','Erdos-Renyi'), (1,"Pref_Attachment_high.png","Pref. Attachment"), (2,"Random_high.png","Random")]: resuls_dit = results_high['results'][result_index] list_histo = [(resuls_dit['random_leverage'], 'g'),(resuls_dit['uniform_random'], 'b'), (resuls_dit['greedy'], 'r'),(resuls_dit['deterministic'], 'k')] histo_plot(filename,title, list_histo) for result_index, filename, title in [ (0,'Erdos_Renyi_low.png','Erdos-Renyi'), (1,"Pref_Attachment_low.png","Pref. Attachment"), (2,"Random_low.png","Random")]: resuls_dit = results_low['results'][result_index] list_histo = [(resuls_dit['random_leverage'], 'g'),(resuls_dit['deterministic'], 'k'),(resuls_dit['uniform_random'], 'b'), (resuls_dit['greedy'], 'r')] histo_plot(filename,title, list_histo) print(len(results_low['results'][0]['random_leverage'])) import numpy as np import matplotlib.pyplot as plt n_bins = 25 alpha_val = 0.4 fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True) ax1.set_aspect(1) ax1.set_ylabel('Count') ax1.hist(a[0]['greedy'], bins=n_bins, alpha=alpha_val, color='r') ax1.hist(a[0]['deterministic'], bins=n_bins, alpha=alpha_val,color='k') ax1.hist(a[0]['random_leverage'], bins=n_bins,alpha=alpha_val, color='g') ax1.hist(a[0]['uniform_random'], bins=n_bins,alpha=alpha_val, color='b') ax1.set_title("Erdos-Renyi") ax2.set_aspect(1) ax2.set_ylabel('Count') ax2.hist(a[1]['greedy'], bins=n_bins, alpha=alpha_val, color='r') ax2.hist(a[1]['deterministic'], bins=n_bins, alpha=alpha_val,color='k') ax2.hist(a[1]['random_leverage'], bins=n_bins,alpha=alpha_val, color='g') ax2.hist(a[1]['uniform_random'], bins=n_bins,alpha=alpha_val, color='b') ax2.set_title("Pref. Attachment") ax3.set_aspect(1) ax3.set_title("Random") # ax2.title.set_rasterized(True) # should display a warning fig.tight_layout() plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=3, hspace=0.5, wspace=0.3) plt.savefig("test_rasterization.png", dpi=150) # if not plt.rcParams["text.usetex"]: # plt.savefig("test_rasterization.svg", dpi=150) # # svg backend currently ignores the dpi ```
github_jupyter
# Basic Text Classification ## Overview This notebook demonstrates text classification starting from plain text files stored on disk. You'll train a binary classifier to perform sentiment analysis on an IMDB dataset. At the end of the notebook, there is an exercise for you to try, in which you'll train a multi-class classifier to predict the tag for a programming question on Stack Overflow. ## Learning Objective In this notebook, you learn how to: 1. Prepare the dataset for training 2. Use loss function and optimizer 3. Train the model 4. Evaluate the model 5. Export the model ## Introduction This notebook shows how to train a sentiment analysis model to classify movie reviews as positive or negative, based on the text of the review. Each learning objective will correspond to a __#TODO__ in the student lab notebook -- try to complete the [target notebook](../labs/text_classification.ipynb) first and then review this solution notebook. ``` # Import necessary libraries import matplotlib.pyplot as plt import os import re import shutil import string import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras import losses # Print the TensorFlow version print(tf.__version__) ``` ## Sentiment analysis This notebook trains a sentiment analysis model to classify movie reviews as *positive* or *negative*, based on the text of the review. This is an example of *binary*—or two-class—classification, an important and widely applicable kind of machine learning problem. You'll use the [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment/) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews. ### Download and explore the IMDB dataset Let's download and extract the dataset, then explore the directory structure. ``` # Download the IMDB dataset url = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz" dataset = tf.keras.utils.get_file("aclImdb_v1", url, untar=True, cache_dir='.', cache_subdir='') dataset_dir = os.path.join(os.path.dirname(dataset), 'aclImdb') # Explore the dataset os.listdir(dataset_dir) train_dir = os.path.join(dataset_dir, 'train') os.listdir(train_dir) ``` The `aclImdb/train/pos` and `aclImdb/train/neg` directories contain many text files, each of which is a single movie review. Let's take a look at one of them. ``` # Print the file content sample_file = os.path.join(train_dir, 'pos/1181_9.txt') with open(sample_file) as f: print(f.read()) ``` ### Load the dataset Next, you will load the data off disk and prepare it into a format suitable for training. To do so, you will use the helpful [text_dataset_from_directory](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/text_dataset_from_directory) utility, which expects a directory structure as follows. ``` main_directory/ ...class_a/ ......a_text_1.txt ......a_text_2.txt ...class_b/ ......b_text_1.txt ......b_text_2.txt ``` To prepare a dataset for binary classification, you will need two folders on disk, corresponding to `class_a` and `class_b`. These will be the positive and negative movie reviews, which can be found in `aclImdb/train/pos` and `aclImdb/train/neg`. As the IMDB dataset contains additional folders, you will remove them before using this utility. ``` remove_dir = os.path.join(train_dir, 'unsup') shutil.rmtree(remove_dir) ``` Next, you will use the `text_dataset_from_directory` utility to create a labeled `tf.data.Dataset`. [tf.data](https://www.tensorflow.org/guide/data) is a powerful collection of tools for working with data. When running a machine learning experiment, it is a best practice to divide your dataset into three splits: [train](https://developers.google.com/machine-learning/glossary#training_set), [validation](https://developers.google.com/machine-learning/glossary#validation_set), and [test](https://developers.google.com/machine-learning/glossary#test-set). The IMDB dataset has already been divided into train and test, but it lacks a validation set. Let's create a validation set using an 80:20 split of the training data by using the `validation_split` argument below. ``` # Create the validation set batch_size = 32 seed = 42 raw_train_ds = tf.keras.utils.text_dataset_from_directory( 'aclImdb/train', batch_size=batch_size, validation_split=0.2, subset='training', seed=seed) ``` As you can see above, there are 25,000 examples in the training folder, of which you will use 80% (or 20,000) for training. As you will see in a moment, you can train a model by passing a dataset directly to `model.fit`. If you're new to `tf.data`, you can also iterate over the dataset and print out a few examples as follows. ``` # Print few examples for text_batch, label_batch in raw_train_ds.take(1): for i in range(3): print("Review", text_batch.numpy()[i]) print("Label", label_batch.numpy()[i]) ``` Notice the reviews contain raw text (with punctuation and occasional HTML tags like `<br/>`). You will show how to handle these in the following section. The labels are 0 or 1. To see which of these correspond to positive and negative movie reviews, you can check the `class_names` property on the dataset. ``` print("Label 0 corresponds to", raw_train_ds.class_names[0]) print("Label 1 corresponds to", raw_train_ds.class_names[1]) ``` Next, you will create a validation and test dataset. You will use the remaining 5,000 reviews from the training set for validation. Note: When using the `validation_split` and `subset` arguments, make sure to either specify a random seed, or to pass `shuffle=False`, so that the validation and training splits have no overlap. ``` raw_val_ds = tf.keras.utils.text_dataset_from_directory( 'aclImdb/train', batch_size=batch_size, validation_split=0.2, subset='validation', seed=seed) raw_test_ds = tf.keras.utils.text_dataset_from_directory( 'aclImdb/test', batch_size=batch_size) ``` ### Prepare the dataset for training Next, you will standardize, tokenize, and vectorize the data using the helpful `tf.keras.layers.TextVectorization` layer. Standardization refers to preprocessing the text, typically to remove punctuation or HTML elements to simplify the dataset. Tokenization refers to splitting strings into tokens (for example, splitting a sentence into individual words, by splitting on whitespace). Vectorization refers to converting tokens into numbers so they can be fed into a neural network. All of these tasks can be accomplished with this layer. As you saw above, the reviews contain various HTML tags like `<br />`. These tags will not be removed by the default standardizer in the `TextVectorization` layer (which converts text to lowercase and strips punctuation by default, but doesn't strip HTML). You will write a custom standardization function to remove the HTML. Note: To prevent [training-testing skew](https://developers.google.com/machine-learning/guides/rules-of-ml#training-serving_skew) (also known as training-serving skew), it is important to preprocess the data identically at train and test time. To facilitate this, the `TextVectorization` layer can be included directly inside your model, as shown later in this tutorial. ``` def custom_standardization(input_data): lowercase = tf.strings.lower(input_data) stripped_html = tf.strings.regex_replace(lowercase, '<br />', ' ') return tf.strings.regex_replace(stripped_html, '[%s]' % re.escape(string.punctuation), '') ``` Next, you will create a `TextVectorization` layer. You will use this layer to standardize, tokenize, and vectorize our data. You set the `output_mode` to `int` to create unique integer indices for each token. Note that you're using the default split function, and the custom standardization function you defined above. You'll also define some constants for the model, like an explicit maximum `sequence_length`, which will cause the layer to pad or truncate sequences to exactly `sequence_length` values. ``` max_features = 10000 sequence_length = 250 # TODO # Created the TextVectorization layer vectorize_layer = layers.TextVectorization( standardize=custom_standardization, max_tokens=max_features, output_mode='int', output_sequence_length=sequence_length) ``` Next, you will call `adapt` to fit the state of the preprocessing layer to the dataset. This will cause the model to build an index of strings to integers. Note: It's important to only use your training data when calling adapt (using the test set would leak information). ``` # Make a text-only dataset (without labels), then call adapt train_text = raw_train_ds.map(lambda x, y: x) vectorize_layer.adapt(train_text) ``` Let's create a function to see the result of using this layer to preprocess some data. ``` def vectorize_text(text, label): text = tf.expand_dims(text, -1) return vectorize_layer(text), label # retrieve a batch (of 32 reviews and labels) from the dataset text_batch, label_batch = next(iter(raw_train_ds)) first_review, first_label = text_batch[0], label_batch[0] print("Review", first_review) print("Label", raw_train_ds.class_names[first_label]) print("Vectorized review", vectorize_text(first_review, first_label)) ``` As you can see above, each token has been replaced by an integer. You can lookup the token (string) that each integer corresponds to by calling `.get_vocabulary()` on the layer. ``` # Print the token (string) that each integer corresponds print("1287 ---> ",vectorize_layer.get_vocabulary()[1287]) print(" 313 ---> ",vectorize_layer.get_vocabulary()[313]) print('Vocabulary size: {}'.format(len(vectorize_layer.get_vocabulary()))) ``` You are nearly ready to train your model. As a final preprocessing step, you will apply the TextVectorization layer you created earlier to the train, validation, and test dataset. ``` # Apply the TextVectorization layer you created earlier to the train, validation, and test dataset train_ds = raw_train_ds.map(vectorize_text) val_ds = raw_val_ds.map(vectorize_text) test_ds = raw_test_ds.map(vectorize_text) ``` ### Configure the dataset for performance These are two important methods you should use when loading data to make sure that I/O does not become blocking. `.cache()` keeps data in memory after it's loaded off disk. This will ensure the dataset does not become a bottleneck while training your model. If your dataset is too large to fit into memory, you can also use this method to create a performant on-disk cache, which is more efficient to read than many small files. `.prefetch()` overlaps data preprocessing and model execution while training. You can learn more about both methods, as well as how to cache data to disk in the [data performance guide](https://www.tensorflow.org/guide/data_performance). ``` AUTOTUNE = tf.data.AUTOTUNE train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE) val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE) test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE) ``` ### Create the model It's time to create your neural network: ``` embedding_dim = 16 # Create your neural network model = tf.keras.Sequential([ layers.Embedding(max_features + 1, embedding_dim), layers.Dropout(0.2), layers.GlobalAveragePooling1D(), layers.Dropout(0.2), layers.Dense(1)]) model.summary() ``` The layers are stacked sequentially to build the classifier: 1. The first layer is an `Embedding` layer. This layer takes the integer-encoded reviews and looks up an embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: `(batch, sequence, embedding)`. To learn more about embeddings, check out the [Word embeddings](https://www.tensorflow.org/text/guide/word_embeddings) tutorial. 2. Next, a `GlobalAveragePooling1D` layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model to handle input of variable length, in the simplest way possible. 3. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units. 4. The last layer is densely connected with a single output node. ### Loss function and optimizer A model needs a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs a probability (a single-unit layer with a sigmoid activation), you'll use `losses.BinaryCrossentropy` loss function. Now, configure the model to use an optimizer and a loss function: ``` # TODO # Configure the model to use an optimizer and a loss function model.compile(loss=losses.BinaryCrossentropy(from_logits=True), optimizer='adam', metrics=tf.metrics.BinaryAccuracy(threshold=0.0)) ``` ### Train the model You will train the model by passing the `dataset` object to the fit method. ``` # TODO # Train the model epochs = 10 history = model.fit( train_ds, validation_data=val_ds, epochs=epochs) ``` ### Evaluate the model Let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy. ``` # TODO # Evaluate the model loss, accuracy = model.evaluate(test_ds) print("Loss: ", loss) print("Accuracy: ", accuracy) ``` This fairly naive approach achieves an accuracy of about 86%. ### Create a plot of accuracy and loss over time `model.fit()` returns a `History` object that contains a dictionary with everything that happened during training: ``` history_dict = history.history history_dict.keys() ``` There are four entries: one for each monitored metric during training and validation. You can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy: ``` # Plot the loss over time acc = history_dict['binary_accuracy'] val_acc = history_dict['val_binary_accuracy'] loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1, len(acc) + 1) # "bo" is for "blue dot" plt.plot(epochs, loss, 'bo', label='Training loss') # b is for "solid blue line" plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() # Plot the accuracy over time plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend(loc='lower right') plt.show() ``` In this plot, the dots represent the training loss and accuracy, and the solid lines are the validation loss and accuracy. Notice the training loss *decreases* with each epoch and the training accuracy *increases* with each epoch. This is expected when using a gradient descent optimization—it should minimize the desired quantity on every iteration. This isn't the case for the validation loss and accuracy—they seem to peak before the training accuracy. This is an example of overfitting: the model performs better on the training data than it does on data it has never seen before. After this point, the model over-optimizes and learns representations *specific* to the training data that do not *generalize* to test data. For this particular case, you could prevent overfitting by simply stopping the training when the validation accuracy is no longer increasing. One way to do so is to use the `tf.keras.callbacks.EarlyStopping` callback. ## Export the model In the code above, you applied the `TextVectorization` layer to the dataset before feeding text to the model. If you want to make your model capable of processing raw strings (for example, to simplify deploying it), you can include the `TextVectorization` layer inside your model. To do so, you can create a new model using the weights you just trained. ``` # TODO # Export the model export_model = tf.keras.Sequential([ vectorize_layer, model, layers.Activation('sigmoid') ]) export_model.compile( loss=losses.BinaryCrossentropy(from_logits=False), optimizer="adam", metrics=['accuracy'] ) # Test it with `raw_test_ds`, which yields raw strings loss, accuracy = export_model.evaluate(raw_test_ds) print(accuracy) ``` ### Inference on new data To get predictions for new examples, you can simply call `model.predict()`. ``` examples = [ "The movie was great!", "The movie was okay.", "The movie was terrible..." ] export_model.predict(examples) ``` Including the text preprocessing logic inside your model enables you to export a model for production that simplifies deployment, and reduces the potential for [train/test skew](https://developers.google.com/machine-learning/guides/rules-of-ml#training-serving_skew). There is a performance difference to keep in mind when choosing where to apply your TextVectorization layer. Using it outside of your model enables you to do asynchronous CPU processing and buffering of your data when training on GPU. So, if you're training your model on the GPU, you probably want to go with this option to get the best performance while developing your model, then switch to including the TextVectorization layer inside your model when you're ready to prepare for deployment. Visit this [tutorial](https://www.tensorflow.org/tutorials/keras/save_and_load) to learn more about saving models. ## Exercise: multi-class classification on Stack Overflow questions This tutorial showed how to train a binary classifier from scratch on the IMDB dataset. As an exercise, you can modify this notebook to train a multi-class classifier to predict the tag of a programming question on [Stack Overflow](http://stackoverflow.com/). A [dataset](https://storage.googleapis.com/download.tensorflow.org/data/stack_overflow_16k.tar.gz) has been prepared for you to use containing the body of several thousand programming questions (for example, "How can I sort a dictionary by value in Python?") posted to Stack Overflow. Each of these is labeled with exactly one tag (either Python, CSharp, JavaScript, or Java). Your task is to take a question as input, and predict the appropriate tag, in this case, Python. The dataset you will work with contains several thousand questions extracted from the much larger public Stack Overflow dataset on [BigQuery](https://console.cloud.google.com/marketplace/details/stack-exchange/stack-overflow), which contains more than 17 million posts. After downloading the dataset, you will find it has a similar directory structure to the IMDB dataset you worked with previously: ``` train/ ...python/ ......0.txt ......1.txt ...javascript/ ......0.txt ......1.txt ...csharp/ ......0.txt ......1.txt ...java/ ......0.txt ......1.txt ``` Note: To increase the difficulty of the classification problem, occurrences of the words Python, CSharp, JavaScript, or Java in the programming questions have been replaced with the word *blank* (as many questions contain the language they're about). To complete this exercise, you should modify this notebook to work with the Stack Overflow dataset by making the following modifications: 1. At the top of your notebook, update the code that downloads the IMDB dataset with code to download the [Stack Overflow dataset](https://storage.googleapis.com/download.tensorflow.org/data/stack_overflow_16k.tar.gz) that has already been prepared. As the Stack Overflow dataset has a similar directory structure, you will not need to make many modifications. 1. Modify the last layer of your model to `Dense(4)`, as there are now four output classes. 1. When compiling the model, change the loss to `tf.keras.losses.SparseCategoricalCrossentropy`. This is the correct loss function to use for a multi-class classification problem, when the labels for each class are integers (in this case, they can be 0, *1*, *2*, or *3*). In addition, change the metrics to `metrics=['accuracy']`, since this is a multi-class classification problem (`tf.metrics.BinaryAccuracy` is only used for binary classifiers). 1. When plotting accuracy over time, change `binary_accuracy` and `val_binary_accuracy` to `accuracy` and `val_accuracy`, respectively. 1. Once these changes are complete, you will be able to train a multi-class classifier. ## Learning more This tutorial introduced text classification from scratch. To learn more about the text classification workflow in general, check out the [Text classification guide](https://developers.google.com/machine-learning/guides/text-classification/) from Google Developers.
github_jupyter
``` import numpy as np import pandas as pd # PyTorch - to build neural network import torch # interactive plotting by Bokeh from bokeh.plotting import figure from bokeh.io import show, output_notebook, push_notebook # pretty progress by tqdm from tqdm import tnrange ``` We're going to use PyViz's [Bokeh](https://bokeh.pydata.org/en/latest/) to build interactive plots in this notebook. Bokeh uses a js kernel to serve data to the browser, so we need to initialize it. ``` output_notebook() ``` # Import data and shape it for ML Let's start by importing the .csv we wrote out in `data_prep.ipynb`. We'll then shuffle the data **(WHY?)**. Then we'll peel the labels off of the features **(AGAIN, WHY?)**. ``` # read in .csv file as a Pandas DataFrame rocks = pd.read_csv('../dat/csv/clean_rocks.csv') # break out the names of the features and rocks for plotting chemical_names = list(rocks.columns) rock_names = ['andesite', 'basalt', 'carbonatite', 'kimberlite', 'rhyolite'] # cast the Pandas dataframe as an array, for use with PyTorch rocks = rocks.as_matrix() print('import shape:', rocks.shape, '\n') # randomly shuffle the samples (rows) so we don't overcondition # the network by training on one rock type at a time np.random.shuffle(rocks) # split off the labels from the features for training ftrs = rocks[:, :-1] lbls = rocks[:, -1] print('features matrix shape:', ftrs.shape) print('labels matrics shape:', lbls.shape) ``` Next we'll split the data up into training and testing datasets. We'll train only on the training split, and hold the testing split out blind. When the model is trained, we'll expose the test data to the network to see if it has really learned anything. If the network has learned (to generalize), it should be able to predict the rock type for samples of chemical compositions it's never seen before. ``` train_percentage = 0.9 train_num = int(np.round(rocks.shape[0] * train_percentage)) train_dat = ftrs[:train_num, :] train_lbl = lbls[:train_num] test_dat = ftrs[train_num:, :] test_lbl = lbls[train_num:] print('train_dat shape:', train_dat.shape) print('train_lbl shape:', train_lbl.shape) print('test_dat shape:', test_dat.shape) print('test_lbl shape:', test_lbl.shape) ``` ### Build neural network This model is slightly different from the one in the regression task of notebook `2.0-Simple-Neural-Network.ipynb` for two reasons: 1. The input and output of this network has mulitiple features, defined by `input_size` and `num_classes`. This is because we have a multifeature input, or "feature vector." In the regression task, we input one value, and expected one output value. In this task, we input as many values as we have chemical composition values. 2. The output of the model is a 5 element vector which represents the probability of the input sample corresponding to each of the 5 "classes," or rock types. The `nn.Softmax` layer on the back of the model calculates these probabilities. Instead of "regressing" one input x value to one output y value, we "classify" one feature vector to one class. We've also included a set of commented out layers. By adding these layers back in at home, you'll add more depth to your network, and increase the accuracy of your predictions. The cost of calculating and backpropagating gradients increases dramatically, so don't expect to be able to train it in an hour on a single thread. ``` class FirstNet(torch.nn.Module): def __init__(self, input_size, num_classes): super(FirstNet, self).__init__() self.fc1 = torch.nn.Linear(input_size, 3000) self.relu1 = torch.nn.ReLU() #self.fc2 = torch.nn.Linear(3000, 2000) #self.relu2 = torch.nn.ReLU() #self.fc3 = torch.nn.Linear(2000, 1000) #self.relu3 = torch.nn.ReLU() self.fc4 = torch.nn.Linear(3000, num_classes) self.soft = torch.nn.Softmax(dim=1) def forward(self, x): out = self.fc1(x) out = self.relu1(out) #out = self.fc2(out) #out = self.relu2(out) #out = self.fc3(out) #out = self.relu3(out) out = self.fc4(out) out = self.soft(out) return out net = FirstNet(input_size=train_dat.shape[1], num_classes=5) print(net) ``` ### Test the Model Below, we'll run the data through the untrained model to see what the output looks like. First we check for a GPU: ``` use_cuda = torch.cuda.is_available() if use_cuda: net = net.cuda() ``` Next we perform a forward pass through the untrained network. To do so, we must _(gasp, I thought this was Python)_ type the matrices as PyTorch tensors. ``` if use_cuda: predictions = net.forward(torch.FloatTensor(train_dat).cuda()).cpu() else: predictions = net.forward(torch.FloatTensor(train_dat)) ``` Now we plot the output from the untrained model against the groundtruth. You can click the labels in the legend to turn off the predictions or the true class values. If you click the mouse wheel control on the right side of the diagram, you can zoom and pan simultaneously. ``` # set up the plot p1 = figure(y_range=rock_names, plot_width=900, plot_height=500, title="First 100 Rocks") p1.title.text_font_size = '24pt' p1.xaxis.axis_label = 'Sample Number' p1.yaxis.axis_label = 'Rock type' p1.xaxis.major_label_orientation = 1.57 # plot the rock type data r1 = p1.circle(range(100), train_lbl[:100] + 0.5, fill_alpha=0.6, line_alpha=0.6, legend='groundtruth') # plot the predictions from the network r2 = p1.circle(range(100), np.argmax(predictions.data.numpy(), axis=1) + 0.5, fill_alpha=0.2, line_alpha=0.2, fill_color='red', line_color='red', legend='prediction') # set up the legend p1.legend.location = "top_left" p1.legend.click_policy="hide" # show the plot inline show(p1, notebook_handle=True) ``` ### Train the Model Now let's train the model. Even the tiny neural network we defined above will take longer than we have time to train during this class, but let's kick it off, watch the loss, and see if it's learning anything: ``` %%time # here, again we type the data and labels as tensors train_dat = torch.FloatTensor(train_dat) train_lbl = torch.LongTensor(train_lbl) # define hyperparameters learning_rate = 0.001 num_epochs = 30 loss_hist = [] # build a multiclass cross entropy loss function criterion = torch.nn.CrossEntropyLoss() # instantiate a stochastic gradient descent optimizer class optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate) # set the model parameters for training mode net.train() # build a loss plot p2 = figure(plot_width=900, plot_height=500) r2 = p2.line(range(len(loss_hist)), loss_hist) p2.legend.location = "top_left" p2.legend.click_policy="hide" loss_plot = show(p2, notebook_handle=True) # send data to GPU, if appropriate if use_cuda: criterion = criterion.cuda() train_dat = train_dat.cuda() train_lbl = train_lbl.cuda() # train for many epochs for epoch in tnrange(num_epochs): # forward pass through the model predictions = net.forward(train_dat) # calculate local value on the loss surface loss = criterion(predictions, train_lbl) # clear the gradient buffer optimizer.zero_grad() # backward pass through the model to calculate gradients loss.backward() # take one step towards a minimum in the loss surface optimizer.step() # replot the network loss for one epoch loss_hist.append(loss.data.numpy()) r2 = p2.line(range(len(loss_hist)), loss_hist) push_notebook(handle=loss_plot) # set the model parameters for inference mode net.eval() ``` # Save Trained Model Always (always!) save your trained model weights. You'll thank yourself laters. ``` import datetime np.save('../dat/checkpoints/loss.npy', np.array(loss_hist), allow_pickle=False) checkpoint_name = ''.join(('../dat/checkpoints/', str(datetime.datetime.now()).replace(' ','_'), '.bin')) torch.save(net.cpu().state_dict(), checkpoint_name) ``` # Predict rock types on unseen data! #### Finally, we'll use our trained model to predict the rock type of data that the model has never seen and calculate its accuracy. First, we build the predictions: ``` %%time # here, again we type the data and labels as tensors if use_cuda: predictions = net.forward(torch.FloatTensor(test_dat).cuda()).cpu() else: predictions = net.forward(torch.FloatTensor(test_dat)) ``` Next, we type convert our predictions back to a standard numpy array and evaluate the maximum probability for each class, elementwise (samplewise). ``` predictions = np.argmax(predictions.data.numpy(), axis=1) ``` Now that we have our predictions, we can calculate the accuracy simply by differencing the groudtruth with the predictions. If the prediction is different than the groundtruth, we assign an "incorrect". If the prediction is the same as the groudtruth we assign a "corrrect". ``` accuracy = np.equal(predictions, test_lbl) accuracy = np.round(100 * np.sum(accuracy) / predictions.shape[0]) print('total accuracy:', accuracy, '%') ``` Now, let's analyze classwise (rock-type-wise) accuracy. We'll perform the same analysis as above, but we'll do it for each rock type to see if the model performs better on a certain rock type. ``` for i, one_rock_type in enumerate(rock_names): idx = np.where(test_lbl == i)[0] accuracy = predictions[idx] == i accuracy = np.round(100 * np.sum(accuracy) / idx.shape[0]) print('total accuracy on rock type {}:'.format(one_rock_type), accuracy, '%') # set up the plot p1 = figure(y_range=rock_names, plot_width=900, plot_height=500, title="Rock Type Predictions") p1.title.text_font_size = '24pt' p1.xaxis.axis_label = 'Sample Number' p1.yaxis.axis_label = 'Rock type' p1.xaxis.major_label_orientation = 1.57 # plot the rock type data r1 = p1.circle(range(test_lbl.shape[0]), test_lbl + 0.5, fill_alpha=0.6, line_alpha=0.6, legend='groundtruth') # plot the predictions from the network r2 = p1.circle(range(test_lbl.shape[0]), predictions + 0.5, fill_alpha=0.2, line_alpha=0.2, fill_color='red', line_color='red', legend='prediction') # set up the legend p1.legend.location = "top_left" p1.legend.click_policy="hide" # show the plot inline show(p1, notebook_handle=True) ```
github_jupyter
# Keys to Remember - Use COO summation convention to create confusion matrix - PageRank Algorithm for reference # Sparse Matrices ``` %matplotlib inline import numpy as np import pandas as pd from scipy import sparse import scipy.sparse.linalg as spla import matplotlib.pyplot as plt import seaborn as sns sns.set_context('notebook', font_scale=1.5) ``` ## Creating a sparse matrix There are many applications in which we deal with matrices that are mostly zeros. For example, a matrix representing social networks is very sparse - there are 7 billion people, but most people are only connected to a few hundred or thousand others directly. Storing such a social network as a sparse rather than dense matrix will offer orders of magnitude reductions in memory requirements and corresponding speed-ups in computation. ### Coordinate format The simplest sparse matrix format is built from the coordinates and values of the non-zero entries. #### From dense matrix ``` A = np.random.poisson(0.2, (5,15)) * np.random.randint(0, 10, (5, 15)) A rows, cols = np.nonzero(A) vals = A[rows, cols] vals rows cols X1 = sparse.coo_matrix(A) X1 print(X1) ``` #### From coordinates Note that the (values, (rows, cols)) argument is a single tuple. ``` X2 = sparse.coo_matrix((vals, (rows, cols))) X2 print(X2) ``` #### Convert back to dense matrix ``` X2.todense() ``` ## Compressed Sparse Row and Column formats When we have repeated entries in the rows or cols, we can remove the redundancy by indicating the location of the first occurrence of a value and its increment instead of the full coordinates. Note that the final index location must be the number of rows or cols since there is no other way to know the shape. These are known as CSR or CSC formats. ``` np.vstack([rows, cols]) indptr = np.r_[np.searchsorted(rows, np.unique(rows)), len(rows)] indptr X3 = sparse.csr_matrix((vals, cols, indptr)) X3 print(X3) X3.todense() ``` #### Casting from COO format Because the coordinate format is more intuitive, it is often more convenient to first create a COO matrix then cast to CSR or CSC form. ``` X4 = X2.tocsr() X4 ``` ## COO summation convention When entries are repeated in a sparse matrix, they are **summed**. This provides a quick way to construct confusion matrices for evaluation of multi-class classification algorithms. ``` rows = np.repeat([0,1], 4) cols = np.repeat([0,1], 4) vals = np.arange(8) rows cols vals X5 = sparse.coo_matrix((vals, (rows, cols))) X5.todense() ``` #### Creating a 2 by 2 confusion matrix ``` obs = np.random.randint(0, 2, 100) pred = np.random.randint(0, 2, 100) vals = np.ones(100).astype('int') pred vals.shape, obs.shape , pred.shape X6 = sparse.coo_matrix((vals, (pred, obs))) X6.todense() ``` #### Creating an $n$ by $n$ confusion matrix For classifications with a large number of classes (e.g. image segmentation), the savings are even more dramatic. ``` from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier iris = datasets.load_iris() knn = KNeighborsClassifier() X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.5, random_state=42) pred = knn.fit(X_train, y_train).predict(X_test) pred y_test X7 = sparse.coo_matrix((np.ones(len(pred)).astype('int'), (pred, y_test))) pd.DataFrame(X7.todense(), index=iris.target_names, columns=iris.target_names) X7.todense() ``` ## Solving large sparse linear systems SciPy provides efficient routines for solving large sparse systems as for dense matrices. We will illustrate by calculating the page rank for airports using data from the [Bureau of Transportation Statisitcs](http://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236). The [PageRank](https://en.wikipedia.org/wiki/PageRank) algorithm is used to rank web pages for search results, but it can be used to rank any node in a directed graph (here we have airports instead of web pages). PageRank is fundamentally about finding the steady state in a Markov chain and can be solved as a linear system. The update at each time step for the page rank $PR$ of a page $p_i$ is ![i0](https://wikimedia.org/api/rest_v1/media/math/render/svg/8a8c0a807f62331cc1740dd6c0f28ac1809926c7) In the above equation, $B_u$ is the set of all nodes $v$ that link to $u$, where each $v$ node contributes its page rank divided by its number of outgoing links $L(v)$. So a node $v$ with a high page rank contributes a large value to a linked node $u$ if $v$ has relatively few other links. ![i0](figs/pagerank.png) The figure shows a network with four nodes, all of which start with a page rank of $1/4$. The values on the edges shows how much of its page rank one nodes contributes to its linked nodes in the first step. By letting the sum of all page ranks to be equal to one, we essentially have a probability distribution over the nodes of the graph. Since the state of the graph only depends on its previous state, we have a Markov chain. If we assume that every node can be reached from every other node, the system will have a steady state - which is what the PageRank algorithm seeks to find. To guard against case where a node has out-degree 0, we allow every node a small random chance of transitioning to any other node using a damping factor $d$. Then we solve the linear system to find the pagerank score $R$. ![i1](https://wikimedia.org/api/rest_v1/media/math/render/svg/6bb0f1469218a064274fd4691143e9ce64639dc2) In matrix notation, this is ![i2](https://wikimedia.org/api/rest_v1/media/math/render/svg/96265e6c41318e793194287f36b5f929075bb876) where ![i2.5](https://wikimedia.org/api/rest_v1/media/math/render/svg/3e82b446a376633a386b10668703a4547f167d1c) At steady state, ![i3](https://wikimedia.org/api/rest_v1/media/math/render/svg/65d2fed50688deaca4640b117c88a9e7a3c2ef0d) and we can rearrange terms to solve for $R$ ![i4](https://wikimedia.org/api/rest_v1/media/math/render/svg/985f19f0c6b69d3a8afb5acc38339ebe4915baa7) ``` data = pd.read_csv('data/airports.csv', usecols=[0,1]) data.shape data.head() lookup = pd.read_csv('data/names.csv', index_col=0) lookup.shape lookup.head() import networkx as nx ``` #### Construct the sparse adjacency matrix ``` g = nx.from_pandas_edgelist(data, source='ORIGIN_AIRPORT_ID', target='DEST_AIRPORT_ID') airports = np.array(g.nodes()) adj_matrix = nx.to_scipy_sparse_matrix(g) ``` #### Construct the transition matrix ``` out_degrees = np.ravel(adj_matrix.sum(axis=1)) diag_matrix = sparse.diags(1 / out_degrees).tocsr() M = (diag_matrix @ adj_matrix).T n = len(airports) d = 0.85 I = sparse.eye(n, format='csc') A = I - d * M b = (1-d) / n * np.ones(n) # so the sum of all page ranks is 1 A.todense() from scipy.sparse.linalg import spsolve r = spsolve(A, b) r.sum() idx = np.argsort(r) top10 = idx[-10:][::-1] bot10 = idx[:10] df = lookup.loc[airports[top10]] df['degree'] = out_degrees[top10] df['pagerank']= r[top10] df df = lookup.loc[airports[bot10]] df['degree'] = out_degrees[bot10] df['pagerank']= r[bot10] df ``` #### Visualize the airport connections graph and label the top and bottom 5 airports by pagerank ``` labels = {airports[i]: lookup.loc[airports[i]].str.split(':').str[0].values[0] for i in np.r_[top10[:5], bot10[:5]]} nx.draw(g, pos=nx.spring_layout(g), labels=labels, node_color='blue', font_color='red', alpha=0.5, node_size=np.clip(5000*r, 1, 5000*r), width=0.1) ```
github_jupyter
# Taxonomic analysis Preambule to Multiple Sequence alignment and Phylogenetic tree building. ### Import necessary modules ``` from Bio import ( SeqIO as seqio, SearchIO as searchio, Entrez as entrez ) from Bio.Seq import Seq as seq import toml import pathlib import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from functools import partial, reduce from annotathon.utils.customobjs import ( Path as path, objdict as odict ) from typing import ( List, Dict, NoReturn, Any, Callable, Tuple, Optional, Union ) from annotathon.parsing.blast import parse_alignment_descriptions as parse_ncbi from annotathon.annotation.helper_functions import * ``` ### Configuration to access NCBI's servers : ``` # Load configuration to access NCBI : with open("../creds/entrezpy.toml", "r") as f: ncbi = toml.load(f, _dict=odict) # set credentials : entrez.api_key = ncbi.credentials.apikey entrez.email = ncbi.credentials.email entrez.tool = ncbi.credentials.tool ncbi.credentials.email # set plotting params : %matplotlib inline plt.style.use('seaborn') plt.rcParams['figure.figsize'] = (15, 8) with open("../config/locations.toml", "r") as f: _config = toml.load(f, _dict=odict) _config locations = odict({ key: path(value) for key, value in _config.locations.items() }) locations blast = odict({ "locations": locations, "data": odict({}) }) blast description_glob = "*Alignment-Descriptions*" ``` ### Load blast alignment descriptions ``` blast.data.update(odict({ "default": parse_ncbi(blast.locations.default.lglob(description_glob)[0]), "cinqk": parse_ncbi(blast.locations.cinqk.lglob(description_glob)[0]), "landmark": parse_ncbi(blast.locations.landmark.lglob(description_glob)[0]), "sp": parse_ncbi(blast.locations.sp.lglob(description_glob)[0]), "taxo": parse_ncbi(blast.locations.anthony.lglob(description_glob)[0]), "hypo": parse_ncbi(blast.locations.anthony.lglob("*.csv")[1]) })) ``` This is probably unnecessary given that we now have all the information from the genbank ! ``` #blast.data.taxo.loc[:, "Description"] = add_function(blast.data.taxo.Description) blast.data.taxo.loc[:, "function"] = add_function(blast.data.taxo.Description) blast.data.taxo.loc[:, "species"] = add_species(blast.data.taxo.Description) #blast.data.cinqk.loc[:, "Description"] = add_function(blast.data.cinqk.Description) blast.data.cinqk.loc[:, "function"] = add_function(blast.data.cinqk.Description) blast.data.cinqk.loc[:, "species"] = add_species(blast.data.cinqk.Description) tests = pd.read_csv("efetch-tests.csv") tests.head() #with entrez.efetch(db="protein", id=tests.Accession.to_list()[0:2], rettype="gb", retmode="text") as finallyyeah: # y = [ entry for entry in seqio.parse(finallyyeah, format="gb") ] download_and_cache_genbank(accessions=tests.Accession.to_list()[0:5], cache_file="mahouille.gb") #y[0].annotations blast.data.cinqk.Accession # SAVE AS GENEBANK ! # FASTA LOSES LOTS OF INFORMATION ! blast.data.cinqk.head() def download_and_cache_genbank( accessions: List[str], cache_file: Union[str, pathlib.Path], efetch_kw: Optional[Dict[str,str]] = None, overwrite: Optional[bool] = None, verbose: Optional[bool] = True ) -> bool: """ Download and cache data via Biopython - NCBI's API This was thought as a way of downloading data directly from the NCBI, in order to create a unified interface to perform multicriteria filtering and selection of hit table results, also caled "Alignment descriptions.csv", when performing a BLAST search. When setting `rettype` in `efetch_kw` param, make sure that you use a valid format. For more info on this subject please read BioPython's documentation: https://biopython.readthedocs.io/en/latest/api/Bio.SeqIO.html?highlight=Seqio%20formats#file-formats It is recommended to use "gb" (shorthand for genbank) as `rettype` as this format preserves the most information. You could opt for "fasta", but this would leave you with only the sequence and a not so descriptive name. Parameters: accessions: a list of strings, accession identifiers. cache_file: either a string or a pathlib.Path instance. This will be the file used to store the results. efetch_kw: A dictionnary containing valid keyword arguments to call Bio.Entrez.efetch(). See `help(Bio.Entrez.efetch)` for more details. Defaults to: _efetch_kw = { "db": "protein", "rettype": "gb", "retmode": "text" } overwrite: Boolean indicating if it is ok to overwrite the cache file. Defaults to True. verbose: Print status messages whilst executing the function ? Defaults to True. Returns: True : if `cache_file` was successfully created and has size > 0 (not blank) False : otherwise """ _efetch_kw = { "db": "protein", "rettype": "gb", "retmode": "text" } assert isinstance(accessions, list), f"param `accessions` has invalid type {type(accessions)}" assert isinstance(cache_file, str) or isinstance(cache_file, pathlib.Path), f"param `cache_file` has invalid type {type(cache_file)}" efetch_kw = efetch_kw or _efetch_kw overwrite = overwrite or False if isinstance(cache_file, str): cache_file = path(cache_file) if cache_file.exists() and not overwrite: raise FileExistsError(f"You are trying to overwrite {cache_file.absolute().as_posix()}") if (not cache_file.exists()) or overwrite: if verbose: print(f"Number of accesions to fetch : {len(accessions)}") print("Querying entrez and fetching results...") print("Be patient, this might take a while...") with entrez.efetch(id=accessions, **efetch_kw) as in_handle: sequences = seqio.parse(in_handle, format=efetch_kw["rettype"]) if verbose: print(f"Finished quering for {len(accessions)} accession numbers") with open(cache_file, "w") as out_handle: if verbose: print(f"Writting entries to cache file {cache_file.absolute().as_posix()}") seqio.write(sequences, out_handle, format=efetch_kw["rettype"]) # os.stat_result(st_mode, st_ino, st_dev, st_nlink, # st_uid, st_gid, st_size, st_atime, st_mtime, st_ctime) if cache_file.exists(): if cache_file.stat()[6] > 0: return True else: return False else: return False download_and_cache_genbank(accessions=blast.data.cinqk.Accession.to_list(), cache_file="5k-info.gb", overwrite=False ) bar = path("5k-info.gb").stat() bar bar[6] with open("foo.txt", "w") as f: f.write(f"{bar[6]}\n") !cat foo.txt #with entrez.efetch(db="protein", id=blast.data.cinqk.Accession.to_list(), rettype="gb", retmode="text") as in_handle: # with open("5k-info.gb", "w") as out_handle: # sequences = seqio.parse(in_handle, format="gb") # seqio.write(sequences, out_handle, format="gb") def add_taxonomy( df: pd.DataFrame, file: Union[str,pathlib.Path], fformat: Optional[str] = None, verbose: bool = True ) -> pd.DataFrame: """ Params: df : a pandas.DataFrame, created by parsing a NCBI BLAST "Alignment-Descriptions" file. This means that the dataframe is expected to contain a `Accession` column. file : a file containing the taxonomy info fformat : The format of the file, defaults to "gb" (genbank) For more info on this subject please read BioPython's documentation: https://biopython.readthedocs.io/en/latest/api/Bio.SeqIO.html?highlight=Seqio%20formats#file-formats Returns: A dataframe containing a new column "taxonomy" with taxonomy info. """ assert isinstance(df, pd.DataFrame), f"param `df` has invalid type {type(df)}" assert isinstance(file, str) or isinstance(file, pathlib.Path), f"param `cache_file` has invalid type {type(file)}" assert "Accession" in df.columns, f"DataFrame `df` has no column `Accession` which is needed for this function" if isinstance(file, str): file = path(file) if not file.exists(): raise FileNotFoundError(f"{file.absolute().as_posix()}") # Begin processing _df = df.copy() # Read taxonomy info with open(file, "r") as f: seq_dict = { sq.id: sq for sq in seqio.parse(f, format=fformat) } # Create accession lookup, taxonomy-yeilding function _tax_by_id = lambda x: ":".join(seq_dict[x].annotations["taxonomy"]) if x in seq_dict.keys() else "" # Add the taxonomy column _df.loc[:, "taxonomy"] = "" _df.loc[:, "taxonomy"] = _df.Accession.apply(_tax_by_id) # Count the number of entries on each one n_newcols = _df.taxonomy.apply(lambda x: np.nan if not x else x).dropna().shape[0] n_oldcols = df.shape[0] # Let the user now not all records got taxonomy info if (n_newcols != n_oldcols) and verbose: print("Some entries have no taxonomy info!") print(f"Original number of entries = {n_oldcols}") print(f"Entries with non-null taxonomy info = {n_newcols}") return _df blast.data.cinqk.shape[0] add_taxonomy(df=blast.data.cinqk, file="5k-info.gb", fformat="gb") bar = add_taxonomy(blast.data.cinqk, file="5k-info.gb", fformat="gb") "Accession" in bar.columns foo = add_taxonomy(tests, file="mahouille.gb", fformat="gb") foo pathlib.Path("foo.txt").absolute().as_posix() with open("5k-info.gb", "r") as f: sequences = list(seqio.parse(f, format="gb")) with open("5k-info.gb", "r") as f: seq_dict = { sq.id: sq for sq in seqio.parse(f, format="gb") } seq_dict blast.data.cinqk.loc[:, "taxc"] = blast.data.cinqk.Accession.apply(taxonomy_by_id).apply(lambda x: np.nan if not x else x).dropna() x = blast.data.cinqk x.head() x.loc[:, "taxonomy"] = "" x for i in range(len(sequences)): x.loc[i, "taxonomy"] = ":".join(sequences[i].annotations["taxonomy"]) x.shape x.head() ``` ### Betaproteobacteries ``` x["Per. ident"].hist() par_identite = x[ x["Per. ident"] > 29 ].copy() par_identite.shape par_eval = par_identite[ par_identite["E value"] > 1e-39 ].copy() par_eval.shape id_nuestras_betas = par_eval.taxonomy.apply(lambda x: x if "Beta" in x else np.nan).dropna().index nuestras_betas = par_eval.loc[id_nuestras_betas, :] nuestras_betas.shape nuestras_betas[["E value", "Per. ident"]].hist() ( nuestras_betas.sort_values(by=["Per. ident"], ascending=False) .sort_values(by=["E value"], ascending=True) .Accession .to_list()[7] ) ( nuestras_betas.sort_values(by=["Per. ident"], ascending=False) .sort_values(by=["E value"], ascending=True) .head(10) ) nuestras_betas.sort_values(by=["E value"], ascending=True).sort_values(by=["Per. ident"], ascending=False).head() x.loc[100, :] ``` ### Firmicutes ``` firm_e_value = x[ x["E value"] > 1e-38 ].copy() firm_e_value.shape firm_id = firm_e_value[ firm_e_value["Per. ident"] > 29 ].copy() firm_id.shape id_nuestras_firmis = firm_id.taxonomy.apply(lambda x: x if "Firm" in x else np.nan).dropna().index id_nuestras_firmis.shape nos_firmis = firm_id.loc[id_nuestras_firmis, :] nos_firmis.shape nos_firmis[["E value", "Per. ident"]].hist() ( nos_firmis[ nos_firmis["E value"] > 9e-38] .sort_values(by=["E value"], ascending=True) .sort_values(by=["Per. ident"], ascending=False) .head(20) ) ( nos_firmis .sort_values(by=["E value"], ascending=True) .sort_values(by=["Per. ident"], ascending=False) .head(5) ) firm_acc = [ "WP_093335167.1", "WP_124220101.1", "ONI42253.1", "NMA33736.1", "WP_009488372.1", "KPN82060.1", ] with entrez.efetch( db="protein", id=firm_acc, rettype="fasta", retmode="text" ) as in_handle: firm_fasta = list(seqio.parse(in_handle, format="fasta")) firm_fasta ``` ### Nos alpha ``` id_alphas = x.taxonomy.apply(lambda x: x if "Alpha" in x else np.nan).dropna().index id_alphas.shape nos_alphas = x.loc[id_alphas, :] nos_alphas.shape aber = ( nos_alphas .sort_values(["Per. ident"], ascending=False) .sort_values(["E value"], ascending=True) ) aber.head() aber.loc[265, "taxonomy"] matchs = [ sequences[i].id == x.Accession[i] for i in range(len(sequences)) ] reduce(lambda x, y: x and y, matchs) ### Whoops : it seems that numerical iteration was indeed a bad idea ! reduce(lambda x, y: x and y, [True, False]) sequences[3000].id == x.Accession[3000] ":".join(sequences[-500].annotations["taxonomy"]) ":".join([]) # some dummy test : with entrez.efetch(db="nucleotide", id="EU490707", rettype="gb", retmode="text") as wow: print(wow.read()) help(entrez.efetch) with open(blast.locations.cinqk.lglob(description_glob)[0], "r") as f: x = f.readline() x ```
github_jupyter
``` import pandas as pd import matplotlib import matplotlib.pyplot as plt import numpy as np df=pd.read_excel("Obsh.xlsx") df #Последняя строчка сомнительная есть смысл удалить df=df[:-1] df['Балл'].describe() df['Балл'].mean() df['Балл'].count() # N4 (df['Балл']<df['Балл'].mean()).sum()/df['Балл'].count()*100 #Процент ниже среднего (df['Балл']>=df['Минимальный балл']).sum() fig, ax = plt.subplots(1) (df['Балл']>=df['Минимальный балл']).value_counts().plot(kind='pie', labels=['', ''], ax=ax, autopct='%.2f%%', fontsize=15) ax.set_title(u'Доля прошедших мин. порог') ax.axis('equal') ax.legend(labels=[u'Прошли', u'Провалили'], framealpha=0) ax.set_ylabel('') plt.tight_layout() (df['Пол']=="Ж").sum() fig, ax = plt.subplots(1) (df['Пол']=="Ж").value_counts().plot(kind='pie', labels=['', ''], ax=ax, autopct='%.2f%%', fontsize=15) ax.set_title(u'Доля прошедших мин. порог') ax.axis('equal') ax.legend(labels=[u'Девушки', u'Юноши'], framealpha=0) ax.set_ylabel('') plt.tight_layout() df['Балл'].hist(bins=50,normed=1) from scipy.stats.kde import gaussian_kde from numpy import linspace,hstack from pylab import plot,show,hist my_density = gaussian_kde(df['Балл'], bw_method = 0.1) # график x = linspace(min(df['Балл']), max(df['Балл']),1000) plot(x, my_density(x),'r') # distribution function df.columns df2=df['Балл'] df2 otlichno=df["Балл"].loc[df['Балл']>=85] horosho=df["Балл"].loc[(df['Балл']<85) & (df['Балл']>=70)] udov=df["Балл"].loc[(df['Балл']<70) & (df['Балл']>=50)] neudov=df["Балл"].loc[df['Балл']<50] ocenki=[(otlichno),(horosho),(udov),(neudov)] ocenki1=[len(otlichno),len(horosho),len(udov),len(neudov)] print(ocenki1) plt.pie(ocenki1,autopct='%.2f%%'); plt.axis('equal'); import seaborn as sns 42 70 87 df['Код ППЭ'].nunique() df['Код ППЭ'].unique() df.groupby('Код ППЭ')['Код ППЭ'] m=df.groupby('Код ППЭ')['Балл'].mean() m m.keys() k=list(map(str,m.keys())) k plt.figure(figsize=(20,8)) plt.bar(k, m.values) m=df.groupby('Код ППЭ')['Балл'].count() m k=list(map(str,m.keys())) k plt.figure(figsize=(20,8)) plt.bar(k, m.values) len(df['Задания с кратким ответом'][0]) len(df['Задания с развёрнутым ответом'][0])/4 l=list(df['Задания с кратким ответом']) l n1=130.0 n2=152.0 df.where(df['Код ППЭ']==n2)['Задания с кратким ответом'].count() l1=list(df[df['Код ППЭ']==n1]['Задания с кратким ответом']) l1 l2=list(df[df['Код ППЭ']==n2]['Задания с кратким ответом']) l2 #split(l) change=lambda x: 0 if ((x=='-')|(x==0)) else 1 r=list(map(change,l[1])) r len(l1) re=lambda x: x/len(l1) res=[] v=[0]*len(l1[0]) for i in l1: k=list(map(change,i)) for j in range(len(k)): v[j]+=int(k[j]) res1=list(map(lambda x: x/len(l1),v)) res=[] v=[0]*len(l2[0]) for i in l2: k=list(map(change,i)) for j in range(len(k)): v[j]+=int(k[j]) res2=list(map(lambda x: x/len(l2),v)) res2 res1 ind= np.arange(len(res1)) width = 0.35 ind fig, ax = plt.subplots() rects1 = ax.bar(ind - width/2 , res1, width, label=n1) rects2 = ax.bar(ind + width/2 , res2, width, label=n2) ax.set_ylabel('Процент') ax.set_title('Процент выполненых заданий') ax.set_xticks(ind) #ax.set_xticklabels((n1, n2)) ax.legend() fig.tight_layout() plt.show() [0]*10 n11=df.where(df['Код ППЭ']==n1)['Балл'].mean() n11 df.where(df['Код ППЭ']==n1)['Балл'].mean() n21=df.where(df['Код ППЭ']==n2)['Балл'].mean() n21 m1=list([str(n1),str(n2)]) m2=list([n11,n21]) #plt.xticks(m1, objects) plt.ylabel('Средний балл') plt.title('Средний балл по двум школам') plt.grid() plt.bar(m1, m2, align='center', alpha=0.5,width = 0.5) d1=df.where((df['Код ППЭ']==n1) & (df['Пол']=='Ж'))['Балл'].mean() d2=df.where((df['Код ППЭ']==n2) & (df['Пол']=='Ж'))['Балл'].mean() m1=df.where((df['Код ППЭ']==n1) & (df['Пол']=='М'))['Балл'].mean() m2=df.where((df['Код ППЭ']==n2) & (df['Пол']=='М'))['Балл'].mean() datad = (d1, d2) datad datam = (m1, m2) datam ind= np.arange(len(datam)) width = 0.35 ind fig, ax = plt.subplots() rects1 = ax.bar(ind - width/2 , datam, width, label='Men') rects2 = ax.bar(ind + width/2 , datad, width, label='Women') ax.set_ylabel('Средний балл') ax.set_title('Средний балл по полам и школам') ax.set_xticks(ind) ax.set_xticklabels((n1, n2)) ax.legend() fig.tight_layout() plt.show() ```
github_jupyter
# Load Libraries & Datasets ``` import os, io, gc import numpy as np import pandas as pd import random from scipy.fft import fft from scipy.signal import hilbert, blackman from sklearn.metrics import mean_absolute_error from sklearn.preprocessing import RobustScaler from sklearn.model_selection import KFold import tensorflow as tf from tensorflow import keras from tensorflow.keras.layers import Input, Dense, Dropout, Activation from tensorflow.keras.layers import Add, concatenate from tensorflow.keras.layers import Bidirectional, LSTM from tensorflow.keras.optimizers import Adam from tensorflow.keras.utils import plot_model from tensorflow.keras.models import Model, Sequential, load_model from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint import matplotlib.pyplot as plt pd.set_option('display.max_columns',None) import warnings warnings.filterwarnings('ignore') gc.enable() SEED = 42 def seed_everything(seed=SEED): os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) random.seed(seed) tf.random.set_seed(seed) seed_everything() train_df = pd.read_csv('../input/ventilator-pressure-prediction/train.csv') test_df = pd.read_csv('../input/ventilator-pressure-prediction/test.csv') submission_df = pd.read_csv('../input/ventilator-pressure-prediction/sample_submission.csv') pressure_unique = np.sort(train_df['pressure'].unique()) len_pressure = len(pressure_unique) PRESSURE_MIN = pressure_unique[0].item() PRESSURE_MAX = pressure_unique[-1].item() PRESSURE_STEP = (pressure_unique[1] - pressure_unique[0]).item() ``` # Data Processing ``` def feature_processing(df, backward_sequence=False): feature_list_1 = ['time_step', 'u_in', 'u_out'] feature_list_2 = ['u_in', 'u_out'] ########################################################################### # backward sequence if backward_sequence: df['rank'] = df.groupby(['breath_id'])['time_step'].rank() df['neg_rank'] = -1 * df['rank'] df = df.sort_values(by=['breath_id','neg_rank']).reset_index(drop=True) del df['rank'],df['neg_rank'] gc.collect() ########################################################################### df['one'] = 1 df['count'] = (df['one']).groupby(df['breath_id']).cumsum() df['u_in_cumsum'] = df.groupby(['breath_id'])['u_in'].cumsum() df['u_in_cummean'] = df['u_in_cumsum'] / df['count'] del df['one'], df['count'] gc.collect() ########################################################################### # cross, delta & area df['log_u_in'] = np.log1p(df['u_in']) df["cross_u_in"] = df["u_in"] * (1 - df["u_out"]) df["cross_time_step"] = df["time_step"] * (1 - df["u_out"]) df['cross_1']= df['u_in'] * df['u_out'] df['cross_2']= df['time_step'] * df['u_out'] df["area_frac"] = df["u_in"] * df["time_step"] df["cross_area_frac"] = df["area_frac"] * (1 - df["u_out"]) df["area_frac_cumsum"] = df.groupby(['breath_id'])["area_frac"].cumsum() df['time_gap'] = df['time_step'] - df.shift(1).fillna(0)['time_step'] df['u_in_gap'] = df['u_in'] - df.shift(1).fillna(0)['u_in'] df['u_in_rate'] = df['u_in_gap'] / df['time_gap'] df.loc[list(range(0, len(df), 80)), 'time_gap'] = 0 df.loc[list(range(0, len(df), 80)), 'u_in_gap'] = 0 df.loc[list(range(0, len(df), 80)), 'u_in_rate'] = 0 df['area_1'] = df['u_in'] * df['time_step'] df['area_2'] = df['u_in'] * df['time_gap'] df['area_timestep_cumsum'] = df.groupby(['breath_id'])['area_1'].cumsum() df['air_flow_rate'] = df['u_out'] - (df['u_in']/100) df['air_flow_area'] = df['air_flow_rate'] * df['time_step'] df['time_step_diff_1'] = df.groupby(['breath_id'])['time_step'].diff(1).fillna(0) df['time_step_diff_1_r'] = df.groupby(['breath_id'])['time_step'].diff(-1).fillna(0) df['delta_1'] = df['time_step_diff_1'] * df['u_in'] df['delta_2'] = df['time_step_diff_1_r'] * df['u_in'] df['area_1'] = df.groupby(['breath_id'])['delta_1'].cumsum() df['area_2'] = df.groupby(['breath_id'])['delta_2'].cumsum() df['area_delta_cumsum'] = df.groupby(['breath_id'])['area_1'].cumsum() df['max_to_cumsum_u_in_per_breath_id'] = df.groupby(['breath_id'])['u_in'].transform('max') - df['u_in_cumsum'] ########################################################################### # vf: approximation for rate of change in volume at a particular time stamp # vt: approximation for total lungs volume at a particular time stamp # source: https://www.kaggle.com/c/ventilator-pressure-prediction/discussion/281299 df['vt'] = 0 df['exponent'] = (-df['time_step']) / (df['R'] * df['C']) df['factor'] = np.exp(df['exponent']) df['v1'] = (df['u_in'] * df['R']) / df['factor'] df['vf'] = (df['u_in_cumsum'] * df['R']) / df['factor'] df.loc[df['time_step'] != 0, 'vt'] = df['area_timestep_cumsum']/(df['C'] * (1 - df['factor'])) df['v'] = df['vf'] + df['vt'] ########################################################################### # lags, difference, and rolling lags = 3 for lag in range(1, lags+1): for feature in feature_list_1: ## lag df[f'{feature}_lag_{lag}'] = df.groupby(['breath_id'])[feature].shift(lag).fillna(0) ## inverse lag df[f'{feature}_lag_inverse_{lag}'] = df.groupby(['breath_id'])[feature].shift(-lag).fillna(0) ## diff lag # df[f'{feature}_lag_diff_{lag}'] = df[feature] - df[f'{feature}_lag_{lag}'] # df[f'{feature}_lag_diff_{lag}'] = df[f'{feature}_lag_diff_{lag}'].fillna(0) ## diff inverse lag # df[f'{feature}_lag_inverse_diff_{lag}'] = df[feature] - df[f'{feature}_lag_inverse_{lag}'] diff = 3 for diff in range(1, diff+1): df[f'u_in_diff_{diff}'] = df.groupby(['breath_id'])['u_in'].diff(diff).fillna(0) # df['u_in_diff_1'] = df.groupby(['breath_id'])['u_in'].diff(1).fillna(0) """ lags = 3 for lag in range(1, lags+1): for feature in feature_list_2: # breath_id lag df[f'breath_id_lag_{lag}'] = df['breath_id'].shift(lag).fillna(0) # breath_id same lag df[f'breath_id_lag_{lag}_same'] = np.select([df[f'breath_id_lag_{lag}'] == df['breath_id']],[1],0) # breath_id and feature_list_2 df[f'breath_id_{feature}_lag_{lag}'] = df[feature].shift(lag).fillna(0) df[f'breath_id_{feature}_lag_{lag}'] = df[f'breath_id_{feature}_lag_{lag}'] * df[f'breath_id_lag_{lag}_same'] del df[f'breath_id_lag_{lag}_same'], df[f'breath_id_{feature}_lag_{lag}'] """ df['mean_u_out_per_breath_id'] = df.groupby(['breath_id'])['u_out'].transform('mean') df['breath_id_u_in_max'] = df.groupby(['breath_id'])['u_in'].transform('max') df['breath_id_u_in_diff_max'] = df.groupby(['breath_id'])['u_in'].transform('max') - df['u_in'] df['breath_id_u_in_diff_max'] = df.groupby(['breath_id'])['u_in'].transform('mean') - df['u_in'] """ windows = [8, 16, 32] for feature in feature_list_: for window in windows: df[f'{feature}_rolling_mean_{window}'] = df.groupby('breath_id')[feature].rolling(window).mean().reset_index(drop=True) df[f'{feature}_rolling_min_{window}'] = df.groupby('breath_id')[feature].rolling(window).min().reset_index(drop=True) df[f'{feature}_rolling_max_{window}'] = df.groupby('breath_id')[feature].rolling(window).max().reset_index(drop=True) df[f'{feature}_rolling_std_{window}'] = df.groupby('breath_id')[feature].rolling(window).std().reset_index(drop=True) df[f'{feature}_rolling_std_{window}'] = df.groupby('breath_id')[feature].rolling(window).sum().reset_index(drop=True) """ ########################################################################### # Features based on aggregations over R, C, rank and rounded u_in value (f1 - f6) # Source: https://www.kaggle.com/l0glikelihood/0-1093-single-public-lb df['sum_per_breath'] = df.groupby(['breath_id'])['u_in'].transform('sum') df['rounded_u_in'] = df['u_in'].round(0) df['rank'] = df.groupby(['breath_id'])['time_step'].rank() df['uid'] = df['R'].astype(str)+'_' + df['C'].astype(str) + '_' + df['rounded_u_in'].astype(str) + '_' + df['rank'].astype(str) # max, min, mean, count values of u_in for each uid df['uid_count'] = df.groupby(['uid'])['uid'].transform('count') df['f1'] = df.groupby(['uid'])['u_in'].transform('mean') df['f2'] = df.groupby(['uid'])['u_in'].transform('min') df['f3'] = df.groupby(['uid'])['u_in'].transform('max') # difference between the current value of u_in and its mean, min and max values within the uid df['f4'] = df['u_in'] - df.groupby(['uid'])['u_in'].transform('mean') df['f5'] = df['u_in'] - df.groupby(['uid'])['u_in'].transform('min') df['f6'] = df['u_in'] - df.groupby(['uid'])['u_in'].transform('max') del df['rounded_u_in'],df['rank'],df['uid'] ########################################################################### """ # spectral features # source: https://www.kaggle.com/lucasmorin/spectral-analysis-feature-engineering ffta = lambda x: np.abs(fft(np.append(x.values,x.values[0]))[:80]) ffta.__name__ = 'ffta' fftw = lambda x: np.abs(fft(np.append(x.values,x.values[0])*w)[:80]) fftw.__name__ = 'fftw' N = 80 w = blackman(N+1) df['fft_u_in'] = df.groupby('breath_id')['u_in'].transform(ffta) df['fft_u_in_w'] = df.groupby('breath_id')['u_in'].transform(fftw) df['analytical'] = df.groupby('breath_id')['u_in'].transform(hilbert) df['envelope'] = np.abs(df['analytical']) df['phase'] = np.angle(df['analytical']) df['unwrapped_phase'] = df.groupby('breath_id')['phase'].transform(np.unwrap) df['phase_shift1'] = df.groupby('breath_id')['unwrapped_phase'].shift(1).astype(np.float32) df['IF'] = df['unwrapped_phase'] - df['phase_shift1'].astype(np.float32) df = df.fillna(0) del df['analytical'] """ ########################################################################### # R and C features df['R_u_in'] = df['u_in'] * df['R'] df['C_u_in'] = df['u_in'] * df['C'] # mean u_in per R, C, and u_out df['mean_u_in_per_R_C_u_out'] = df.groupby(['R','C','u_out'])['u_in'].transform('mean') df['diff_mean_u_in_per_R_C_u_out'] = df['u_in'] - df['mean_u_in_per_R_C_u_out'] df['to_mean_u_in_per_R_C_u_out'] = df.groupby(['breath_id'])['u_in'].transform('mean') - df['mean_u_in_per_R_C_u_out'] # max value of u_in grouped by R, C, and u_out df['max_u_in_per_R_C_u_out'] = df.groupby(['R','C','u_out'])['u_in'].transform('max') df['diff_max_u_in_per_R_C_u_out'] = df['u_in'] - df['max_u_in_per_R_C_u_out'] df['to_max_u_in_per_R_C_u_out'] = df.groupby(['breath_id'])['u_in'].transform('max') - df['max_u_in_per_R_C_u_out'] # OHE df['R'] = df['R'].astype(str) df['C'] = df['C'].astype(str) df['R_C'] = df['R'].astype(str) + '_' + df['C'].astype(str) df = pd.get_dummies(df) ########################################################################### return df df = pd.concat([train_df,test_df],axis=0,copy=False).reset_index(drop=True) df = feature_processing(df) train = df.iloc[:len(train_df),:] test = df.iloc[len(train_df):,:].reset_index(drop=True) del df, train_df, test_df del test['pressure'] gc.collect() train_df = train.copy() targets = train[['pressure']].to_numpy().reshape(-1, 80) train.drop(['pressure','id', 'breath_id'], axis=1, inplace=True) u_outs = train[['u_out']].to_numpy().reshape(-1, 80) test = test.drop(['id', 'breath_id'], axis=1) # Scaler RS = RobustScaler(quantile_range=(20.0, 80.0)) RS.fit(train[train['u_out']==0]) train = RS.fit_transform(train) test = RS.transform(test) train = train.reshape(-1, 80, train.shape[-1]) test = test.reshape(-1, 80, train.shape[-1]) print(f"train: {train.shape} \ntest: {test.shape} \ntargets: {targets.shape} \nu_outs: {u_outs.shape}") gc.collect() ``` # ResBiLSTM Model ``` # Accelerator Configuration try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu) BATCH_SIZE = tpu_strategy.num_replicas_in_sync * 64 print("Running on TPU:", tpu.master()) print(f"Batch Size: {BATCH_SIZE}") except ValueError: strategy = tf.distribute.get_strategy() BATCH_SIZE = 512 print(f"Running on {strategy.num_replicas_in_sync} replicas") print(f"Batch Size: {BATCH_SIZE}") # Model Configuration class CFG: seed = 42 VERBOSE = 1 random_state = 42 N_FOLDS = 5 EPOCHS = 200 BATCH_SIZE = BATCH_SIZE factor = 0.5 patience_1 = 5 patience_2 = 15 learning_rate = 1e-3 weight_decay = 1e-3 dropout_rate = 0.2 # Custom MAE Loss def custom_mae_loss(y_true, y_pred, n=80): u_out = y_true[:, n: ] y = y_true[:, :n ] w = 1 - u_out mae = w * tf.abs(y - y_pred) return tf.reduce_sum(mae, axis=-1) / tf.reduce_sum(w, axis=-1) def dnn_model(): x_input = Input(shape=(train.shape[-2:])) x1 = Bidirectional(LSTM(units=1024, return_sequences=True))(x_input) c1 = concatenate([x_input, x1]) x2 = Bidirectional(LSTM(units=512, return_sequences=True))(c1) c2 = concatenate([x1, x2]) x3 = Bidirectional(LSTM(units=256, return_sequences=True))(c2) c3 = concatenate([x2, x3]) x4 = Bidirectional(LSTM(units=128, return_sequences=True))(c3) c4 = concatenate([x3, x4]) x5 = Dense(units=128, activation='selu')(c4) x_output = Dense(units=1)(x5) model = Model(inputs=x_input, outputs=x_output, name='DNN_Model') model.compile(optimizer='Adam', loss=custom_mae_loss) return model model = dnn_model() model.summary() plot_model(model, to_file='dnn_model.png', show_shapes=True, show_layer_names=True) test_preds = [] history_list = [] oof_true = [] oof_pred = [] with tpu_strategy.scope(): kf = KFold(n_splits=CFG.N_FOLDS, shuffle=True, random_state=42) oof_preds = np.zeros((train.shape[0],train.shape[1])) for fold, (train_idx, test_idx) in enumerate(kf.split(train, targets)): print('='*25, '>', f'Fold {fold+1}', '<', '='*25) checkpoint_filepath = f'fold{fold+1}.hdf5' X_train, X_valid = train[train_idx], train[test_idx] y_train, y_valid = targets[train_idx], targets[test_idx] u_out_train, u_out_valid = u_outs[train_idx], u_outs[test_idx] lr = ReduceLROnPlateau(monitor="val_loss", factor=CFG.factor, patience=CFG.patience_1, verbose=CFG.VERBOSE) es = EarlyStopping(monitor="val_loss", patience=CFG.patience_2, mode="min", restore_best_weights=True, verbose=CFG.VERBOSE) sv = ModelCheckpoint(checkpoint_filepath, monitor = 'val_loss', verbose = CFG.VERBOSE, save_best_only = True, save_weights_only = True, mode = 'min') model = dnn_model() history = model.fit(X_train, np.append(y_train, u_out_train, axis =1), validation_data=(X_valid, np.append(y_valid, u_out_valid, axis =1)), epochs=CFG.EPOCHS, batch_size=CFG.BATCH_SIZE, callbacks=[lr,es,sv]) history_list += [history] # predict oof y_pred = model.predict(X_valid) y_true = y_valid.squeeze().reshape(-1, 1) ## inspiratory and expiratory phases score = mean_absolute_error(y_true, y_pred.squeeze().reshape(-1, 1)) print(f'Fold {fold+1} | Overall MAE Score: {score}') ## inspiratory phase oof_true.append(y_true) oof_pred.append(y_pred.squeeze().reshape(-1, 1)) oof_preds[test_idx] = y_pred.reshape(y_pred.shape[0],y_pred.shape[1]) reshaped_targets = targets.squeeze().reshape(-1,1).squeeze() score = mean_absolute_error(reshaped_targets,oof_preds.squeeze().reshape(-1,1).squeeze()) print(f'Fold {fold+1} | Inspiratory MAE Score: {score}') # predict test test_preds.append(model.predict(test).squeeze().reshape(-1, 1).squeeze()) del X_train, X_valid, y_train, y_valid gc.collect() np.save('test_preds.npy', test_preds) oof_preds = oof_preds.squeeze().reshape(-1,1).squeeze() reshaped_targets = targets.squeeze().reshape(-1,1).squeeze() score = mean_absolute_error(reshaped_targets, oof_preds) print(f'Overall OOF MAE Score: {score}') idx = train_df[train_df['u_out']==0].index train_df['prediction'] = oof_preds score = mean_absolute_error(train_df.loc[idx,'pressure'],train_df.loc[idx,'prediction']) print(f'Training Inspiratory MAE Score: {score}') idx = train_df[train_df['u_out']==1].index train_df['prediction'] = oof_preds score = mean_absolute_error(train_df.loc[idx,'pressure'],train_df.loc[idx,'prediction']) print(f'Training Expiratory MAE Score: {score}') t = 0 for k in range(CFG.N_FOLDS): mae = np.mean(np.abs(oof_pred[k] - oof_true[k])) t += mae print(f'Fold {k+1} | MAE Validation Score: {mae}') print(f'Overall CV MAE: {t/CFG.N_FOLDS}') t = 0 for k in range(CFG.N_FOLDS): mae = np.mean(np.abs(oof_preds[k] - oof_true[k])) t += mae print(f'Fold {k+1} | Inspiratory MAE Score: {mae}') print(f'Overall Inspiratory MAE Score: {t/CFG.N_FOLDS}') def plot_hist(hist, with_grid=True): plt.figure(figsize=(20,5)) for i in range(len(hist)): plt.plot(hist[i].history["loss"], color='grey') plt.plot(hist[i].history["val_loss"], color='green') plt.title("") plt.ylabel("Mean Absolute Error") plt.xlabel("epoch") plt.legend(["Training", "Validation"], loc="upper right") if with_grid: plt.grid(which='major', axis='both') plt.show() plot_hist(history_list) ``` # Post-Processing ``` # Ensemble Folds with Mean submission_df['pressure'] = np.mean(np.vstack(test_preds),axis=0) submission_df.to_csv('submission_mean.csv', index=False) # Ensemble Folds with Median submission_df['pressure'] = np.median(np.vstack(test_preds),axis=0) submission_df.to_csv('submission_median.csv', index=False) # Ensemble Folds with Median and Round Prediction submission_df['pressure'] = np.mean(np.vstack(test_preds),axis=0) submission_df['pressure'] = np.round((submission_df['pressure'] - PRESSURE_MIN)/PRESSURE_STEP) * PRESSURE_STEP + PRESSURE_MIN submission_df['pressure'] = np.clip(submission_df['pressure'], PRESSURE_MIN, PRESSURE_MAX) submission_df.to_csv('submission_median_round.csv', index=False) # Nearest Neighbor Method ## Mean submission_df['pressure'] = np.mean(np.vstack(test_preds),axis=0) submission_df['pressure'] = submission_df['pressure'].map(lambda x: pressure_unique[np.abs(pressure_unique - x).argmin()]) submission_df.to_csv('submission_nn_mean.csv', index=False) ## Median submission_df['pressure'] = np.median(np.vstack(test_preds),axis=0) submission_df['pressure'] = submission_df['pressure'].map(lambda x: pressure_unique[np.abs(pressure_unique - x).argmin()]) submission_df.to_csv('submission_nn_median.csv', index=False) # Mean-Median Method # Source: https://www.kaggle.com/c/ventilator-pressure-prediction/discussion/282735 def better_than_median(inputs, spread_lim = None, axis=0): """ Compute the mean of the predictions if there are no outliers, or the median if there are outliers. Parameter: inputs = ndarray of shape (n_samples, n_folds) """ spread = inputs.max(axis=axis) - inputs.min(axis=axis) print(f"Inliers: {(spread < spread_lim).sum():7} -> compute mean") print(f"Outliers: {(spread >= spread_lim).sum():7} -> compute median") print(f"Total: {len(inputs):7}") return np.where(spread < spread_lim, np.mean(inputs, axis=axis), np.median(inputs, axis=axis)) submission_df['pressure'] = better_than_median(np.vstack(test_preds), spread_lim = 0.50) submission_df.to_csv('submission_mixed_50.csv', index=False) ```
github_jupyter
# Initialization ``` %load_ext autoreload %autoreload 2 # Noelle from noelle import Motor, Fluid, FluidMixture # Numpy import numpy as np # Matplotlib from matplotlib import pyplot as plt import matplotlib import matplotlib as mpl from labellines import labelLine, labelLines # RocketCEA from rocketcea.biprop_utils.rho_isp_plot_obj import RhoIspPlot # Configure plot styles # Sizes mpl.rcParams['figure.figsize'] = [12.0, 6.0] mpl.rcParams['figure.dpi'] = 120 mpl.rcParams['savefig.dpi'] = 120 # Font font = {'family' : 'normal', 'weight' : 'bold', 'size' : 22} matplotlib.rc('font', **font) # Style plt.style.use(['science']) ``` # Fuel and Oxidizer Definitions ``` # Oxidizers LOX = Fluid(name='O2(L)', coolprop_name='oxygen', formula='O 2', fluid_type='oxidizer', storage_pressure=35e5, storage_temperature=90.17) GOX = Fluid(name='O2(G)', coolprop_name='oxygen', formula='O 2', fluid_type='oxidizer', storage_pressure=35e5, storage_temperature=298.15) NOX = Fluid(name='N2O', coolprop_name='NitrousOxide', formula=None, fluid_type='oxidizer', storage_temperature=298.15) oxidizers_list = [GOX, LOX, NOX] # Fuels H2O = Fluid(name='H2O(L)', coolprop_name='water', formula='H 2 O 1', fluid_type='fuel', storage_pressure=35e5, storage_temperature=298.15) LCH4 = Fluid(name='CH4(L)', coolprop_name='methane', formula='C 1 H 4', fluid_type='fuel', storage_pressure=1e5) GCH4 = Fluid(name='CH4(G)', coolprop_name='methane', formula='C 1 H 4', fluid_type='fuel', storage_pressure=35e5, storage_temperature=298.15) LC2H5OH = Fluid(name='C2H5OH(L)', coolprop_name='ethanol', formula='C 2 H 6 O 1', fluid_type='fuel', storage_pressure=35e5, storage_temperature=298.15) JetA = Fluid(name='JetA', coolprop_name=None, formula=None, fluid_type='fuel', storage_pressure=35e5, storage_temperature=298.15, storage_density=815, storage_enthalpy=72466.6) # Fuel Blend H2O_30_C2H50H_70 = FluidMixture(fluid1=LC2H5OH, x1=70, fluid2=H2O, x2=30) fuels_list = [LCH4, GCH4, LC2H5OH, JetA, H2O_30_C2H50H_70] ``` # Main Motor Parameters for Selected Propellants ``` NOELLE = Motor( NOX, H2O_30_C2H50H_70, thrust = 1500, burn_time = 10, p_chamber = 35, n_cstar = 1, n_cf = 1, cd_ox = 0.6, cd_fuel = 0.182, suboptimal = 1 ) NOELLE.report() ``` # Fuel and Oxidizers Comparison Fuel List: - CH4 (L) - LNG - CH4 (G) - NG - C2H6O (L) - Ethanol - C12H23 (L) - JetA - Kerosene Oxidizer List: - O2 (L) - O2 (G) - NO2 (L-G) ## Oxidizer Fuel Combinations ``` # Combinations import itertools oxidizer_fuel_combinations = list(itertools.product(oxidizers_list, fuels_list)) print("Number of combinations:", len(oxidizer_fuel_combinations)) for combination in oxidizer_fuel_combinations: print("-----------"+str(combination)+"-----------") test_motor = Motor( combination[0], combination[1], thrust = 1000, burn_time = 10, p_chamber = 35, n_cstar = 0.885, n_cf = 0.95, cd_ox = 0.4, cd_fuel = 0.4 ) test_motor.report() # Combinations import itertools oxidizer_fuel_combinations = list(itertools.product(oxidizers_list, fuels_list)) for combination in oxidizer_fuel_combinations: # Extract fuel and oxidizer oxidizer = combination[0] fuel = combination[1] # Create test motor test_motor = Motor(oxidizer, fuel, thrust = 1000, burn_time = 10, p_chamber = 35, n_cstar = 0.885, n_cf = 0.95, cd_ox = 0.4, cd_fuel = 0.4) print("Test Motor - Oxidizer: *"+str(oxidizer)+"* | Fuel: *"+str(fuel)+"* | Isp: *{:0.1f}* s | Iv: *{:0.1f}* 10^3 Ns/m³ | c*: {:0.1f} m/s".format(test_motor.Isp, test_motor.Iv/100, test_motor.cstar)) ``` ## $I_{sp}$ and $\rho \cdot I_{sp}$ Comparison ``` # Combinations import itertools oxidizer_fuel_combinations = list(itertools.product(oxidizers_list, fuels_list)) results_x = [] results_y = [] for combination in oxidizer_fuel_combinations: sub_result_x = [] sub_result_y = [] # Extract fuel and oxidizer oxidizer = combination[0] fuel = combination[1] for suboptimal in np.linspace(0.7, 1.3, 20): # Create test motor test_motor = Motor(oxidizer, fuel, thrust = 1000, burn_time = 10, p_chamber = 35, n_cstar = 0.885, n_cf = 0.95, cd_ox = 0.4, cd_fuel = 0.4, suboptimal = suboptimal) sub_result_x.append(test_motor.propellant_storage_density) sub_result_y.append(test_motor.Isp) results_x.append(sub_result_x) results_y.append(sub_result_y) print("Test Motor - Oxidizer: *"+str(oxidizer)+"* | Fuel: *"+str(fuel)+"* | Isp: *{:0.1f}* s | Iv: *{:0.1f}* 10^3 Ns/m³".format(test_motor.Isp, test_motor.Iv/100)) plt.figure() x_mesh = np.linspace(1, 1200, 1200) # for i in range(40, 260, 20): # plt.plot(x_mesh, 1000*i/x_mesh, color='grey', linestyle='--', label=str(i)) # labelLines(plt.gca().get_lines(),zorder=2.5) a5, = plt.plot(results_x[4], results_y[4], label=str(oxidizer_fuel_combinations[4])[1:-1]) a1, = plt.plot(results_x[6], results_y[6], label=str(oxidizer_fuel_combinations[6])[1:-1]) a2, = plt.plot(results_x[7], results_y[7], label=str(oxidizer_fuel_combinations[7])[1:-1]) a3, = plt.plot(results_x[8], results_y[8], label=str(oxidizer_fuel_combinations[8])[1:-1]) a5, = plt.plot(results_x[10], results_y[10], label=str(oxidizer_fuel_combinations[10])[1:-1]) a6, = plt.plot(results_x[11], results_y[11], label=str(oxidizer_fuel_combinations[11])[1:-1]) a1, = plt.plot(results_x[0], results_y[0], '--', label=str(oxidizer_fuel_combinations[0])[1:-1]) a2, = plt.plot(results_x[1], results_y[1], '--', label=str(oxidizer_fuel_combinations[1])[1:-1]) a3, = plt.plot(results_x[2], results_y[2], '--', label=str(oxidizer_fuel_combinations[2])[1:-1]) a4, = plt.plot(results_x[3], results_y[3], '--', label=str(oxidizer_fuel_combinations[3])[1:-1]) a6, = plt.plot(results_x[5], results_y[5], '--', label=str(oxidizer_fuel_combinations[5])[1:-1]) a4, = plt.plot(results_x[9], results_y[9], '--', label=str(oxidizer_fuel_combinations[9])[1:-1]) # plt.xlim(650, 1100) # plt.ylim(190, 250) plt.xlabel('Propellant Storage Density (kg/m³)') plt.ylabel('Specific Impulse (s)') plt.title('Specific Impulse vs. Density') # plt.legend() plt.grid() plt.show() # Export results to files # plt.savefig("output.png", bbox_inches="tight") # np.savetxt('I_sp_x.csv', results_x, delimiter=',') # np.savetxt('I_sp_y.csv', results_y, delimiter=',') rp = RhoIspPlot(dpi=120, bipropL=oxidizer_fuel_combinations, Pc=500, eps=2.4) rp.add_rho_isp_contours(label_frac_pos=0.4) rp.show() ```
github_jupyter
# Welcome to Reinforcement Learning Reinforcement Learning is a framework for tackling **sequential decision problems**: what to do next in order to maximize a reward (which might be delayed), on a changing universe (which might react to our actions). Concrete examples include: - Game playing: which actions are critical to win a game? This could be [Atari](http://karpathy.github.io/2016/05/31/rl/) or [Go](https://en.wikipedia.org/wiki/AlphaZero). - Learning in a "small world": what actions maximize pleasure / minimize pain? - [Treatment of chronic diseases](https://www.ncbi.nlm.nih.gov/pubmed/19731397): how to evolve the treatment for a disease that creates resistance? The unifying theme on the problems above can be abstracted as follows: - An **agent** receives a signal from the environment, selected by Nature. - The agent executes an **action**. - Given the agents' action, Nature assigns a reward and draws a new state, which is announced to the agent. - The situation repeats until a terminal criterion is reached. We will use the OpenAI Gym [(https://github.com/openai/gym)](https://github.com/openai/gym). environment for this. It consists of a number of Atari environments that we can use for experimenting. If you haven't please install the library OpenAI gym (`pip install gym`). To test your installation, run the following script: ```python import gym env = gym.make('CartPole-v0') env.reset() for _ in range(1000): env.render() env.step(env.action_space.sample()) # take a random action ``` You should see a window that opens with a car and a pole, and will most likely close quickly. ## A walk on the Frozen Lake We will start with a very simple environment, called `FrozenLake`. ``` !pip install -q tqdm #from tqdm import tqdm import gym #create a single game instance env = gym.make("FrozenLake-v0") ``` Here, **S** is the initial state, and your aim is to reach **G**, without falling into the holes, **H**. The squares marked with **F** are frozen, which means you can step on them. **Note:** The environment is non-deterministic, you can slip in the ice and end up in a different state. How to use the environment? - **reset()** returns the initial state / first observation. - **render()** returns the current environment state. - **step(a)** returns what happens after action a: - *new observation*: the new state. - *reward*: the reward corresponding to that action in that state. - *is done*: binary flag, True if the game is over. - *info*: Some auxiliary stuff, which we can ignore now. ``` print("The initial state: ", env.reset()) print(" and it looks like: ") env.render() print("Now let's take an action: ") env.reset() new_state, reward, done, _ = env.step(1) env.render() idx_to_action = { 0:"<", #left 1:"v", #down 2:">", #right 3:"^" #up } ``` A **policy** is a function from states to actions. It tells us what we should do on each state. In this case, an array of size 16 with entries 0,1,2 or 3 determines a **deterministic** policy, whereas an array of size 16x4 with entries between 0 and 1 and where each row sums 1 determines a **stochastic** policy. For simplicity, we will implement policies as dictionaries for `FrozenLake`. ``` import numpy as np n_states = env.observation_space.n n_actions = env.action_space.n # Initialize random_policy: def init_random_policy(): random_policy = {} for state in range(n_states): random_policy[state] = np.random.choice(n_actions) return random_policy ``` We need now to define a function to evaluate our policy. ``` def evaluate(env, policy, max_episodes=100): tot_reward = 0 for ep in range(max_episodes): state = env.reset() done = False ep_reward = 0 # Reward per episode while not done: action = policy[state] new_state, reward, done, _ = env.step(action) ep_reward += reward state = new_state if done: tot_reward += ep_reward return tot_reward/(max_episodes) ``` ### Looking for the best policy: Random search As a very first example, let's try to find our policy by pure random search: we will play for some time and keep track of the best actions we can do on each state. `FrozenLake` defines "solving" as getting average reward of 0.78 over 100 consecutive trials ``` best_policy = None best_score = -float('inf') # Random search for i in range(1,10000): #tip: you can use tqdm(range(1,10000)) for a progress bar policy = init_random_policy() score = evaluate(env,policy,100) if score > best_score: best_score = score best_policy = policy if i%5000 == 0: print("Best score:", best_score) print("Best policy:") #print(best_policy) ``` Now let's see the policy in action: ``` def play(env,policy, render=False): s = env.reset() d = False while not d: a = policy[s] print("*"*10) print("State: ",s) print("Action: ",idx_to_action[a]) s, r, d, _ = env.step(a) if render: env.render() if d: print(r) ``` Let's create a small function to print a nicer policy: ``` def print_policy(policy): lake = "SFFFFHFHFFFHHFFG" arrows = [idx_to_action[policy[i]] if lake[i] in 'SF' \ else '*' for i in range(n_states)] for i in range(0,16,4): print(''.join(arrows[i:i+4])) ``` We can call then use the functions above to render the optimal policy. Note that the policy might not give the optimal action: recall that there is some noise involved (you can slip on the ice) which is responsible of a non-optimal action looking like optimal. ``` print_policy(best_policy) #play(env,best_policy) ``` ## Using a different policy Let's try some different implementation of a random policy, which will be more useful later on. ``` # theta = 0.25*np.ones((n_states,n_actions)) def random_parameter_policy(theta): theta = theta/np.sum(theta, axis=1, keepdims=True) # ensure that the array is normalized policy = {} probs = {} for state in range(n_states): probs[state] = np.array(theta[state,:]) policy[state] = np.random.choice(n_actions, p = probs[state]) return policy best_policy = None best_score = -float('inf') alpha = 1e-2 theta = 0.25*np.ones((n_states,n_actions)) # Random search for i in range(1,10000): policy = random_parameter_policy(theta) score = evaluate(env,policy,100) if score > best_score: best_score = score best_policy = policy theta = theta + alpha*np.random.rand(n_states,n_actions) if i%5000 == 0: print("Best score:", best_score) #print("Best policy:") #print(best_policy) #print("Best score:", best_score) ``` What's the advantage of this? Perhaps not much right now, but this is the first step to use more sophisticated techniques over random search. Note that we do a "gradient update" of sorts when we change the parameter `theta` in the direction of increase of the best score. This hints that we could use other update rules, perhaps taking the output as a more sophisticated input of the game history. Another thing to notice is that we made effectively our policy **stochastic**: at every stage the agent has the possibility of choosing randomly his action. This has the effect of smoothing out the problem: we are now solving an optimization problem on a continuous, instead of a discrete space. ## Your turn: - Beat the hill climbing / random search benchmark! Implement a different search method for the parameters. - Try the `CartPole` environment. In `CartPole`, the state is continuous (4 different parameters), so you need to do something on the lines of the parameterized random search example. Look at http://kvfrans.com/simple-algoritms-for-solving-cartpole/ for inspiration.
github_jupyter
# TrackML Kubeflow Pipeline This notebook assumes that you have already set up a GKE cluster with Kubeflow installed. Currently, this notebook must be run from the Kubeflow JupyterHub installation. In this notebook, we will show how to: * Interactively define a Kubeflow Pipeline using the Pipelines Python SDK * Submit and run the pipeline ## Setup Do some imports and set some variables. Set the `WORKING_DIR` to a path under the Cloud Storage bucket you created earlier. ``` import kfp # the Pipelines SDK. This library is included with the notebook image. from kfp import compiler import kfp.dsl as dsl import kfp.gcp as gcp import kfp.notebook KUBECTL_IMAGE = "gcr.io/mcas-195423/trackml_master_kfp_kubectl" KUBECTL_IMAGE_VERSION = "1" TRACKML_IMAGE = "gcr.io/mcas-195423/trackml_master_trackml" TRACKML_IMAGE_VERSION = "1" ``` ## Create an *Experiment* in the Kubeflow Pipeline System The Kubeflow Pipeline system requires an "Experiment" to group pipeline runs. You can create a new experiment, or call `client.list_experiments()` to get existing ones. ``` # Note that this notebook should be running in JupyterHub in the same cluster as the pipeline system. # Otherwise, additional config would be required to connect. client = kfp.Client() client.list_experiments() exp = client.create_experiment(name='trackml_notebook') # If the previous line throws an error, try uncommenting this one, which fetches an existing experiment: # exp = client.get_experiment(experiment_name='trackml_notebook') ``` ## Define a Pipeline Authoring a pipeline is like authoring a normal Python function. The pipeline function describes the topology of the pipeline. Each step in the pipeline is typically a `ContainerOp` --- a simple class or function describing how to interact with a docker container image. In the pipeline, all the container images referenced in the pipeline are already built. The pipeline starts by training a model. When it finishes, it exports the model in a form suitable for serving by [TensorFlow serving](https://github.com/tensorflow/serving/). The next step deploys a TF-serving instance with that model. The last step generates a results file. ``` def train_op(): return dsl.ContainerOp( name='train', image="{}:{}".format(TRACKML_IMAGE, TRACKML_IMAGE_VERSION), command=["python"], arguments=["train.py"], ).apply(gcp.use_gcp_secret() )#.set_gpu_limit(1) def serve_op(): return dsl.ContainerOp( name='serve', image="{}:{}".format(KUBECTL_IMAGE, KUBECTL_IMAGE_VERSION), arguments=[ "/src/set_kubectl.sh", "--namespace", "kubeflow", "--command", "apply -f /src/k8s/serve.yaml", ] ).apply(gcp.use_gcp_secret()) def resultsgen_op(): return dsl.ContainerOp( name='resultsgen', image="{}:{}".format(TRACKML_IMAGE, TRACKML_IMAGE_VERSION), command=["python"], arguments=["resultsgen.py"], ).apply(gcp.use_gcp_secret()) @dsl.pipeline( name='trackml', description='A pipeline that predicts particle tracks' ) def trackml(): train = train_op() serve = serve_op() serve.after(train) resultsgen = resultsgen_op() resultsgen.after(serve) ``` ## Submit an experiment *run* ``` compiler.Compiler().compile(trackml, 'trackml.tar.gz') ``` The call below will run the compiled pipeline. ``` run = client.run_pipeline(exp.id, 'trackml', 'trackml.tar.gz') ```
github_jupyter
# 基于 GCN 的有监督学习 图神经网络(GNN)结合了图结构和机器学习的优势. GraphScope提供了处理学习任务的功能。本次教程,我们将会展示GraphScope如何使用GCN算法训练一个模型。 本次教程的学习任务是在文献引用网络上的点分类任务。在点分类任务中,算法会确定[Cora](https://linqs.soe.ucsc.edu/data)数据集上每个顶点的标签。在```Cora```数据集中,由学术出版物作为顶点,出版物之间的引用作为边,如果出版物A引用了出版物B,则图中会存在一条从A到B的边。Cora数据集中的节点被分为了七个主题类,我们的模型将会训练来预测出版物顶点的主题。 在这一任务中,我们使用图聚合网络(GCN)算法来训练模型。有关这一算法的更多信息可以参考["Knowing Your Neighbours: Machine Learning on Graphs"](https://medium.com/stellargraph/knowing-your-neighbours-machine-learning-on-graphs-9b7c3d0d5896) 这一教程将会分为以下几个步骤: - 建立会话和载图 - 启动GraphScope的学习引擎,并将图关联到引擎上 - 使用内置的GCN模型定义训练过程,并定义相关的超参 - 开始训练 首先,我们要新建一个会话,并载入数据 ``` import os import graphscope k8s_volumes = { "data": { "type": "hostPath", "field": { "path": "/testingdata", "type": "Directory" }, "mounts": { "mountPath": "/home/jovyan/datasets", "readOnly": True } } } # 开启会话 graphscope.set_option(show_log=True) sess = graphscope.session(k8s_volumes=k8s_volumes) # 载入cora图数据 graph = sess.g() graph = graph.add_vertices("/home/jovyan/datasets/cora/node.csv", "paper") graph = graph.add_edges("/home/jovyan/datasets/cora/edge.csv", "cites") ``` 然后,我们需要定义一个特征列表用于图的训练。训练特征集合必须从点的属性集合中选取。在这个例子中,我们选择了属性集合中所有以"feat_"为前缀的属性作为训练特征集,这一特征集也是Cora数据中点的特征集。 借助定义的特征列表,接下来,我们使用会话的`learning`方法来开启一个学习引擎。(`learning`方法的文档可参考[Session](https://graphscope.io/docs/reference/session.html)) 在这个例子中,我们在`learning`方法中,指定在数据中`paper`类型的顶点和`cites`类型边上进行模型训练。 利用`gen_labels`参数,我们将`paper`点数据集进行划分,其中75%作为训练集,10%作为验证集,15%作为测试集。 ``` # define the features for learning paper_features = [] for i in range(1433): paper_features.append("feat_" + str(i)) # launch a learning engine. lg = sess.learning(graph, nodes=[("paper", paper_features)], edges=[("paper", "cites", "paper")], gen_labels=[ ("train", "paper", 100, (0, 75)), ("val", "paper", 100, (75, 85)), ("test", "paper", 100, (85, 100)) ]) ``` 这里我们使用内置的GCN模型定义训练过程。你可以在[Graph Learning Model](https://graphscope.io/docs/learning_engine.html#data-model)获取更多内置学习模型的信息。 在本次示例中,我们使用tensorflow作为NN后端训练器。 ``` from graphscope.learning.examples import GCN from graphscope.learning.graphlearn.python.model.tf.trainer import LocalTFTrainer from graphscope.learning.graphlearn.python.model.tf.optimizer import get_tf_optimizer # supervised GCN. def train(config, graph): def model_fn(): return GCN( graph, config["class_num"], config["features_num"], config["batch_size"], val_batch_size=config["val_batch_size"], test_batch_size=config["test_batch_size"], categorical_attrs_desc=config["categorical_attrs_desc"], hidden_dim=config["hidden_dim"], in_drop_rate=config["in_drop_rate"], neighs_num=config["neighs_num"], hops_num=config["hops_num"], node_type=config["node_type"], edge_type=config["edge_type"], full_graph_mode=config["full_graph_mode"], ) trainer = LocalTFTrainer( model_fn, epoch=config["epoch"], optimizer=get_tf_optimizer( config["learning_algo"], config["learning_rate"], config["weight_decay"] ), ) trainer.train_and_evaluate() # define hyperparameters config = { "class_num": 7, # output dimension "features_num": 1433, "batch_size": 140, "val_batch_size": 300, "test_batch_size": 1000, "categorical_attrs_desc": "", "hidden_dim": 128, "in_drop_rate": 0.5, "hops_num": 2, "neighs_num": [5, 5], "full_graph_mode": False, "agg_type": "gcn", # mean, sum "learning_algo": "adam", "learning_rate": 0.01, "weight_decay": 0.0005, "epoch": 5, "node_type": "paper", "edge_type": "cites", } ``` 在定义完训练过程和超参后,现在我们可以使用学习引擎和定义的超参开始训练过程。 ``` train(config, lg) ``` 训练完毕后,需要关掉会话 ``` sess.close() ```
github_jupyter
# Python で気軽に化学・化学工学 # 第 8 章 モデル y = f(x) を構築して、新たなサンプルの y を推定する ## 8.5 モデルの推定性能を低下させる要因とその解決手法(PLS) ## Jupyter Notebook の有用なショートカットのまとめ - <kbd>Esc</kbd>: コマンドモードに移行(セルの枠が青) - <kbd>Enter</kbd>: 編集モードに移行(セルの枠が緑) - コマンドモードで <kbd>M</kbd>: Markdown セル (説明・メモを書く用) に変更 - コマンドモードで <kbd>Y</kbd>: Code セル (Python コードを書く用) に変更 - コマンドモードで <kbd>H</kbd>: ヘルプを表示 - コマンドモードで <kbd>A</kbd>: ひとつ**上**に空のセルを挿入 - コマンドモードで <kbd>B</kbd>: ひとつ**下**に空のセルを挿入 - コマンドモードで <kbd>D</kbd><kbd>D</kbd>: セルを削除 - <kbd>Ctrl</kbd>+<kbd>Enter</kbd>: セルの内容を実行 - <kbd>Shift</kbd>+<kbd>Enter</kbd>: セルの内容を実行して下へ わからないことがありましたら、関係する単語やエラーの文章などでウェブ検索してご自身で調べてみましょう。 ### 沸点のデータセット (descriptors_8_with_boiling_point.csv) Hall and Story が収集した[沸点のデータセット](https://pubs.acs.org/doi/abs/10.1021/ci960375x)。294 個の化合物について、沸点 (Boiling Point) が測定されており、8 つの特徴量 (記述子) で化学構造が数値化されています。記述子は、分子量 (MolWt)、水素原子以外の原子で計算された分子量 (HeavyAtomMolWt)、価電子の数 (NumValenceElectrons)、水素原子以外の原子の数 (HeavyAtomCount)、窒素原子と酸素原子の数 (NOCount)、水素原子と炭素原子以外の原子の数 (NumHeteroatoms)、回転可能な結合の数 (NumRotatableBonds)、環の数 (RingCount) です。 ``` import pandas as pd # pandas のインポート dataset = pd.read_csv('descriptors_8_with_boiling_point.csv', index_col=0, header=0) # 沸点のデータセットの読み込み dataset.shape dataset # 念のため確認 x = dataset.iloc[:, 1:] # 記述子を 説明変数 x とします x # 念のための確認 y = dataset.iloc[:, 0] # 沸点を目的変数 y とします y # 念のため確認 ``` トレーニングデータとテストデータの分割 ``` from sklearn.model_selection import train_test_split # ランダムにトレーニングデータとテストデータとに分割。random_state に数字を与えることで、別のときに同じ数字を使えば、ランダムとはいえ同じ結果にすることができます x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=94, shuffle=True, random_state=99) ``` 下はテキスト化していますが、shuffle=False とすると、ランダムに分割されるのではなく、下から test_size の数のサンプルがテストデータに、残りのサンプルがトレーニングデータになります。時系列データにおいて、時間的に古いサンプルをトレーニングデータに、新しいサンプルをテストデータとしたいときなどに利用します。 ``` #x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=94, shuffle=False) x_train.shape x_test.shape y_train.shape y_test.shape ``` 特徴量の標準化 (オートスケーリング) ``` autoscaled_x_train = (x_train - x_train.mean()) / x_train.std() # トレーニングデータの説明変数の標準化。平均を引いてから、標準偏差で割ります autoscaled_x_test = (x_test - x_train.mean()) / x_train.std() # テストデータの説明変数の標準化には、トレーニングデータの平均と標準偏差を用いることに注意してください autoscaled_y_train = (y_train - y_train.mean()) / y_train.std() # トレーニングデータの目的変数の標準化 ``` 最小二乗 (Ordinary Least Squares, OLS) 法による線形重回帰分析 ``` from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(autoscaled_x_train, autoscaled_y_train) # モデルの構築。OLS による線形重回帰分析では、標準回帰係数を計算することに対応します ``` 標準回帰係数 ``` model.coef_ # 標準回帰係数。array 型で出力されます standard_regression_coefficients = pd.DataFrame(model.coef_) # pandas の DataFrame 型に変換 standard_regression_coefficients.index = x_train.columns # 変数に対応する名前を、元のデータの変数名に standard_regression_coefficients.columns = ['standard_regression_coefficients'] # 列名を変更 standard_regression_coefficients # 念のため確認 ``` 例えば HeavyAtomMolWt (水素原子以外で計算された分子量) の標準回帰係数が負に大きい値となっています。水素原子以外で計算された分子量の大きな化合物は沸点が低く推定されるということです。分子量の大きな化合物は沸点が高い傾向がある、という知見と一致しません。このような現象が起きた原因の一つとして、共線性・多重共線性が考えられます。特徴量間の相関係数を見てみましょう。 相関係数の確認 ``` correlation_coefficients = autoscaled_x_train.corr() #相関行列の計算 correlation_coefficients # 相関行列の確認 ``` 相関行列をヒートマップとして確認 ``` import matplotlib.pyplot as plt import seaborn as sns plt.rcParams['font.size'] = 12 # 横軸や縦軸の名前の文字などのフォントのサイズ sns.heatmap(correlation_coefficients, vmax=1, vmin=-1, cmap='seismic', square=True, xticklabels=1, yticklabels=1) plt.xlim([0, correlation_coefficients.shape[0]]) plt.ylim([0, correlation_coefficients.shape[0]]) plt.show() ``` 下のようにすれば、ヒートマップの各領域に相関係数の値を入れられます。 ``` plt.rcParams['font.size'] = 12 # 横軸や縦軸の名前の文字などのフォントのサイズ sns.heatmap(correlation_coefficients, vmax=1, vmin=-1, cmap='seismic', annot=True, square=True, xticklabels=1, yticklabels=1) plt.xlim([0, correlation_coefficients.shape[0]]) plt.ylim([0, correlation_coefficients.shape[0]]) plt.show() ``` HeavyAtomMolWt と相関の非常に高い記述子に、(当たり前ですが) MolWt や NumValenceElectrons や HeavyAtomCount があることがわかります。このように、データセットにおける二つ以上の説明変数間の高い相関関係を多重共線性といい、標準回帰係数の値が不適切に正や負に大きくしてしまう原因の一つです。 PLS による回帰分析の実行 ``` from sklearn.cross_decomposition import PLSRegression # PLS モデル構築やモデルを用いた y の値の推定に使用 number_of_components = 2 # 主成分の数 model = PLSRegression(n_components=number_of_components) model.fit(autoscaled_x_train, autoscaled_y_train) # 回帰モデルの構築 ``` 標準回帰係数 ``` model.coef_ # 標準回帰係数。array 型で出力されます standard_regression_coefficients = pd.DataFrame(model.coef_) # pandas の DataFrame 型に変換 standard_regression_coefficients.index = x_train.columns # 説明変数に対応する名前を、元のデータの説明変数名に standard_regression_coefficients.columns = ['standard_regression_coefficients'] # 列名を変更 standard_regression_coefficients # 念のため確認 ``` 例えば 主成分の数が 2 のとき、MolWt (分子量) や HeavyAtomMolWt (水素原子以外で計算された分子量) をはじめとして、すべての標準回帰係数が正の値になっています。大きい分子ほど、各記述子の値も大きくなる傾向があることから、大きな分子の化合物は沸点が高い傾向がある、という知見と一致します。 ``` standard_regression_coefficients.to_csv('standard_regression_coefficients.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください ``` トレーニングデータの y の値の推定 ``` model.predict(autoscaled_x_train) #トレーニングデータの y の値を推定。array 型で出力されます estimated_y_train = pd.DataFrame(model.predict(autoscaled_x_train)) # pandas の DataFrame 型に変換 estimated_y_train = estimated_y_train * y_train.std() + y_train.mean() # スケールをもとに戻します estimated_y_train.index = x_train.index # サンプル名を、元のデータのサンプル名に estimated_y_train.columns = ['estimated_y'] # 列名を変更 estimated_y_train # 念のため確認 estimated_y_train.to_csv('estimated_y_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください ``` トレーニングデータの y の実測値 vs. 推定値プロット ``` import matplotlib.pyplot as plt import matplotlib.figure as figure # 図の調整に使用 plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ plt.figure(figsize=figure.figaspect(1)) # 図の形を正方形に plt.scatter(y_train, estimated_y_train.iloc[:, 0]) # 散布図。estimated_y_train は 200×1 の行列のため、0 列目を選択する必要があります y_max = max(y_train.max(), estimated_y_train.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得 y_min = min(y_train.min(), estimated_y_train.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得 plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成 plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定 plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定 plt.xlabel("actual y") # x 軸の名前 plt.ylabel("estimated y") # y 軸の名前 plt.show() # 以上の設定で描画 ``` トレーニングデータの r<sup>2</sup>, MAE ``` from sklearn import metrics metrics.r2_score(y_train, estimated_y_train) # r2 metrics.mean_absolute_error(y_train, estimated_y_train) # MAE ``` テストデータの y の値の推定。トレーニングデータをテストデータに変えるだけで、実行する内容はトレーニングデータのときと同じです ``` model.predict(autoscaled_x_test) # テストデータの y の値を推定。array 型で出力されます estimated_y_test = pd.DataFrame(model.predict(autoscaled_x_test)) # pandas の DataFrame 型に変換 estimated_y_test = estimated_y_test * y_train.std() + y_train.mean() # スケールをもとに戻します estimated_y_test.index = x_test.index # サンプル名を、元のデータのサンプル名に estimated_y_test.columns = ['estimated_y'] # 列名を変更 estimated_y_test # 念のため確認 estimated_y_test.to_csv('estimated_y_test.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください ``` テストデータの y の実測値 vs. 推定値プロット ``` plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ plt.figure(figsize=figure.figaspect(1)) # 図の形を正方形に plt.scatter(y_test, estimated_y_test.iloc[:, 0]) # 散布図。estimated_y_train は 200×1 の行列のため、0 列目を選択する必要があります y_max = max(y_test.max(), estimated_y_test.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得 y_min = min(y_test.min(), estimated_y_test.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得 plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成 plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定 plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定 plt.xlabel("actual y") # x 軸の名前 plt.ylabel("estimated y") # y 軸の名前 plt.show() # 以上の設定で描画 ``` テストデータの r<sup>2</sup>, MAE ``` metrics.r2_score(y_test, estimated_y_test) # r2 metrics.mean_absolute_error(y_test, estimated_y_test) # MAE ``` 主成分の数を変えて、それぞれの PLS モデルの標準回帰係数や推定性能を評価してみましょう。 OLS と PLS とで、回帰モデルの標準回帰係数や推定性能を比較してみましょう。 ## OLS でオーバーフィッティングが起こり、PLS でそれが軽減される例 ### 医薬品錠剤の NIR スペクトルのデータセット (shootout_2002.csv) 2002 年にInternational Diffuse Reflectance Conference (IDRC) が公開した[錠剤の NIR スペクトルのデータセット](http://www.idrc-chambersburg.org/shootout_2002.htm)。460 個の錠剤について、目的変数は錠剤中の有効成分 (Active Pharmaceutical Ingredient, API) の含量 [mg] であり、説明変数は波長 600, 602, ..., 1898 nm で計測された NIR (Near-InfraRed) スペクトル (FOSS NIRSystems Multitab Analysers) 650変数です。 ``` import pandas as pd # pandas のインポート dataset = pd.read_csv('shootout_2002.csv', index_col=0, header=0) # 沸点のデータセットの読み込み dataset.shape # データセットのサンプル数、特徴量の数の確認 dataset # 念のため確認 x = dataset.iloc[:, 1:] # スペクトルの特徴量を説明変数 x とします x # 念のための確認 y = dataset.iloc[:, 0] # APIを目的変数 y とします y # 念のため確認 ``` トレーニングデータとテストデータの分割 ``` from sklearn.model_selection import train_test_split # ランダムにトレーニングデータとテストデータとに分割。今回はテストデータのサンプル数を 115 とします x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=115, shuffle=True, random_state=99) ``` 特徴量の標準化 (オートスケーリング) ``` autoscaled_x_train = (x_train - x_train.mean()) / x_train.std() # トレーニングデータの説明変数の標準化。平均を引いてから、標準偏差で割ります autoscaled_x_test = (x_test - x_train.mean()) / x_train.std() # テストデータの説明変数の標準化には、トレーニングデータの平均と標準偏差を用いることに注意してください autoscaled_y_train = (y_train - y_train.mean()) / y_train.std() # トレーニングデータの目的変数の標準化 ``` 最小二乗 (Ordinary Least Squares, OLS) 法による線形重回帰分析 ``` from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(autoscaled_x_train, autoscaled_y_train) # モデルの構築。OLS による線形重回帰分析では、標準回帰係数を計算することに対応します standard_regression_coefficients = pd.DataFrame(model.coef_) # pandas の DataFrame 型に変換 standard_regression_coefficients.index = x_train.columns # 変数に対応する名前を、元のデータの変数名に standard_regression_coefficients.columns = ['standard_regression_coefficients'] # 列名を変更 standard_regression_coefficients # 念のため確認 standard_regression_coefficients.to_csv('standard_regression_coefficients_ols.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください ``` トレーニングデータの y の値の推定 ``` estimated_y_train = pd.DataFrame(model.predict(autoscaled_x_train)) # 推定し、pandas の DataFrame 型に変換 estimated_y_train = estimated_y_train * y_train.std() + y_train.mean() # スケールをもとに戻します estimated_y_train.index = x_train.index # サンプル名を、元のデータのサンプル名に estimated_y_train.columns = ['estimated_y'] # 列名を変更 estimated_y_train # 念のため確認 estimated_y_train.to_csv('estimated_y_train_ols.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください ``` トレーニングデータの y の実測値 vs. 推定値プロット ``` import matplotlib.pyplot as plt import matplotlib.figure as figure # 図の調整に使用 plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ plt.figure(figsize=figure.figaspect(1)) # 図の形を正方形に plt.scatter(y_train, estimated_y_train.iloc[:, 0]) # 散布図。estimated_y_train は 200×1 の行列のため、0 列目を選択する必要があります y_max = max(y_train.max(), estimated_y_train.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得 y_min = min(y_train.min(), estimated_y_train.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得 plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成 plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定 plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定 plt.xlabel("actual y") # x 軸の名前 plt.ylabel("estimated y") # y 軸の名前 plt.show() # 以上の設定で描画 ``` トレーニングデータの r<sup>2</sup>, MAE ``` from sklearn import metrics metrics.r2_score(y_train, estimated_y_train) # r2 metrics.mean_absolute_error(y_train, estimated_y_train) # MAE ``` 実測値 vs. 推定値プロットではサンプルは対角線上にあり、r<sup>2</sup> は 1、MAE はほとんど 0 です。x によって y を完璧に説明できていますが、API の測定結果やスペクトルの測定結果にはノイズが含まれているはずであり、そのような結果は不自然と考えられます。テストデータの y の値を推定して確認してみます。 ``` estimated_y_test = pd.DataFrame(model.predict(autoscaled_x_test)) # 推定して、pandas の DataFrame 型に変換 estimated_y_test = estimated_y_test * y_train.std() + y_train.mean() # スケールをもとに戻します estimated_y_test.index = x_test.index # サンプル名を、元のデータのサンプル名に estimated_y_test.columns = ['estimated_y'] # 列名を変更 estimated_y_test # 念のため確認 estimated_y_test.to_csv('estimated_y_test_ols.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください ``` テストデータの y の実測値 vs. 推定値プロット ``` plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ plt.figure(figsize=figure.figaspect(1)) # 図の形を正方形に plt.scatter(y_test, estimated_y_test.iloc[:, 0]) # 散布図。estimated_y_train は 200×1 の行列のため、0 列目を選択する必要があります y_max = max(y_test.max(), estimated_y_test.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得 y_min = min(y_test.min(), estimated_y_test.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得 plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成 plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定 plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定 plt.xlabel("actual y") # x 軸の名前 plt.ylabel("estimated y") # y 軸の名前 plt.show() # 以上の設定で描画 ``` テストデータの r<sup>2</sup>, MAE ``` metrics.r2_score(y_test, estimated_y_test) # r2 metrics.mean_absolute_error(y_test, estimated_y_test) # MAE ``` 実測値 vs. 推定値のプロットや MAE の値から、トレーニングデータにおける推定誤差と比較して、テストデータにおける推定誤差が大きいことが確認できます。OLS モデルがトレーニングんデータにオーバーフィッティングしていると考えられます。その原因を検討するため、説明変数の間の相関係数を確認します。 ``` correlation_coefficients = autoscaled_x_train.corr() #相関行列の計算 correlation_coefficients # 相関行列の確認 ``` 相関行列をヒートマップとして確認 (プロットに少し時間がかかります) ``` import matplotlib.pyplot as plt import seaborn as sns plt.rcParams['font.size'] = 12 # 横軸や縦軸の名前の文字などのフォントのサイズ sns.heatmap(correlation_coefficients, vmax=1, vmin=-1, cmap='seismic', square=True, xticklabels=1, yticklabels=1) plt.xlim([0, correlation_coefficients.shape[0]]) plt.ylim([0, correlation_coefficients.shape[0]]) plt.show() ``` 説明変数の数が多いため、横軸と縦軸の目盛りは見えにくいですが、ヒートマップから判断すると、特に波長の近い特徴量の間に、とても高い相関関係 (相関係数が 0.99 を超える相関関係) があることがわかります。このように、特徴量の間に強い共線性があると、OLS による線形重回帰分析ではモデルがオーバフィッティングしてしまい、新しいサンプルに対する予測精度が低くなることが多いです。 次に、PLS による線形重回帰分析を行います。 PLS による回帰分析の実行 ``` from sklearn.cross_decomposition import PLSRegression # PLS モデル構築やモデルを用いた y の値の推定に使用 number_of_components = 5 # 主成分の数 ``` 今回は主成分の数をとりあえず 5 としています。最適な主成分数の決め方は 8.6 節で扱います。 ``` model = PLSRegression(n_components=number_of_components) model.fit(autoscaled_x_train, autoscaled_y_train) # 回帰モデルの構築 ``` 標準回帰係数 ``` standard_regression_coefficients = pd.DataFrame(model.coef_) # pandas の DataFrame 型に変換 standard_regression_coefficients.index = x_train.columns # 変数に対応する名前を、元のデータの変数名に standard_regression_coefficients.columns = ['standard_regression_coefficients'] # 列名を変更 standard_regression_coefficients # 念のため確認 ``` OLS における標準回帰係数の絶対値と比較して、絶対値が小さい傾向があります。PLS モデルがオーバーフィッティングしている可能性が、OLS モデルと比較して低いと考えられます。 ``` standard_regression_coefficients.to_csv('standard_regression_coefficients_pls.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください ``` トレーニングデータの y の値の推定 ``` estimated_y_train = pd.DataFrame(model.predict(autoscaled_x_train)) # 推定して、pandas の DataFrame 型に変換 estimated_y_train = estimated_y_train * y_train.std() + y_train.mean() # スケールをもとに戻します estimated_y_train.index = x_train.index # サンプル名を、元のデータのサンプル名に estimated_y_train.columns = ['estimated_y'] # 列名を変更 estimated_y_train # 念のため確認 estimated_y_train.to_csv('estimated_y_train_pls.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください ``` トレーニングデータの y の実測値 vs. 推定値プロット ``` import matplotlib.pyplot as plt import matplotlib.figure as figure # 図の調整に使用 plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ plt.figure(figsize=figure.figaspect(1)) # 図の形を正方形に plt.scatter(y_train, estimated_y_train.iloc[:, 0]) # 散布図。estimated_y_train は 200×1 の行列のため、0 列目を選択する必要があります y_max = max(y_train.max(), estimated_y_train.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得 y_min = min(y_train.min(), estimated_y_train.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得 plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成 plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定 plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定 plt.xlabel("actual y") # x 軸の名前 plt.ylabel("estimated y") # y 軸の名前 plt.show() # 以上の設定で描画 ``` トレーニングデータの r<sup>2</sup>, MAE ``` from sklearn import metrics metrics.r2_score(y_train, estimated_y_train) # r2 metrics.mean_absolute_error(y_train, estimated_y_train) # MAE ``` OLS におけるトレーニングデータの推定結果と比較すると、r<sup>2</sup> は小さく、MAEは大きいです。また実測値 vs. 推定値のプロットより、対角線から離れているサンプルも見られます。次に、テストデータで検証します。 ``` estimated_y_test = pd.DataFrame(model.predict(autoscaled_x_test)) # 推定して、pandas の DataFrame 型に変換 estimated_y_test = estimated_y_test * y_train.std() + y_train.mean() # スケールをもとに戻します estimated_y_test.index = x_test.index # サンプル名を、元のデータのサンプル名に estimated_y_test.columns = ['estimated_y'] # 列名を変更 estimated_y_test # 念のため確認 estimated_y_test.to_csv('estimated_y_test_pls.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください ``` テストデータの y の実測値 vs. 推定値プロット ``` plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ plt.figure(figsize=figure.figaspect(1)) # 図の形を正方形に plt.scatter(y_test, estimated_y_test.iloc[:, 0]) # 散布図。estimated_y_train は 200×1 の行列のため、0 列目を選択する必要があります y_max = max(y_test.max(), estimated_y_test.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得 y_min = min(y_test.min(), estimated_y_test.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得 plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成 plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定 plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定 plt.xlabel("actual y") # x 軸の名前 plt.ylabel("estimated y") # y 軸の名前 plt.show() # 以上の設定で描画 ``` テストデータの r<sup>2</sup>, MAE ``` metrics.r2_score(y_test, estimated_y_test) # r2 metrics.mean_absolute_error(y_test, estimated_y_test) # MAE ``` テストデータの推定結果を確認すると、OLS と比べて PLS のほうが r<sup>2</sup> は大きく、MAE は小さく、さらに実測値 vs. 推定値のプロットより対角線付近に固まっていることから、OLS より PLS のほうがテストデータの推定結果は高いといえます。PLS では OLS と比較してオーバーフィッティングが軽減されていることが確認できました。今回のデータセットにおいては、回帰モデル構築に用いていないサンプルに対する推定性能の高い PLS のほうが OLS より望ましいです。 自分のデータセットをお持ちの方は、そのデータセットでも今回の内容を確認してみましょう。 前回の仮想的な樹脂材料のデータセット用いた練習問題をぜひ PLS でも行ってみましょう。
github_jupyter
# First BigQuery ML models for Taxifare Prediction In this notebook, we will use BigQuery ML to build our first models for taxifare prediction. BigQuery ML provides a fast way to build ML models on large structured and semi-structured datasets. We'll start by creating a dataset to hold all the models we create in BigQuery ``` !bq mk serverlessml ``` ## Model 1: Raw data Let's build a model using just the raw data. It's not going to be very good, but sometimes it is good to actually experience this. The model will take a minute or so to train. When it comes to ML, this is blazing fast. ``` %%bigquery CREATE OR REPLACE MODEL serverlessml.model1_rawdata OPTIONS(input_label_cols=['fare_amount'], model_type='linear_reg') AS SELECT (tolls_amount + fare_amount) AS fare_amount, pickup_longitude AS pickuplon, pickup_latitude AS pickuplat, dropoff_longitude AS dropofflon, dropoff_latitude AS dropofflat, passenger_count*1.0 AS passengers FROM `nyc-tlc.yellow.trips` WHERE MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 100000) = 1 ``` Once the training is done, visit the [BigQuery Cloud Console](https://console.cloud.google.com/bigquery) and look at the model that has been trained. Then, come back to this notebook. Note that BigQuery automatically split the data we gave it, and trained on only a part of the data and used the rest for evaluation. We can look at eval statistics on that held-out data: ``` %%bigquery SELECT * FROM ML.EVALUATE(MODEL serverlessml.model1_rawdata) ``` Let's report just the error we care about, the Root Mean Squared Error (RMSE) ``` %%bigquery SELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL serverlessml.model1_rawdata) ``` We told you it was not going to be good! Recall that our heuristic got 7.42, and our target is $6. Note that the error is going to depend on the dataset that we evaluate it on. We can also evaluate the model on our own held-out benchmark/test dataset, but we shouldn't make a habit of this (we want to keep our benchmark dataset as the final evaluation, not make decisions using it all along the way. If we do that, our test dataset won't be truly independent). ``` %%bigquery SELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL serverlessml.model1_rawdata, ( SELECT (tolls_amount + fare_amount) AS fare_amount, pickup_longitude AS pickuplon, pickup_latitude AS pickuplat, dropoff_longitude AS dropofflon, dropoff_latitude AS dropofflat, passenger_count*1.0 AS passengers FROM `nyc-tlc.yellow.trips` WHERE MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 100000) = 2 AND trip_distance > 0 AND fare_amount >= 2.5 AND pickup_longitude > -78 AND pickup_longitude < -70 AND dropoff_longitude > -78 AND dropoff_longitude < -70 AND pickup_latitude > 37 AND pickup_latitude < 45 AND dropoff_latitude > 37 AND dropoff_latitude < 45 AND passenger_count > 0 )) ``` ## Model 2: Apply data cleanup Recall that we did some data cleanup in the previous lab. Let's do those before training. This is a dataset that we will need quite frequently in this notebook, so let's extract it first. ``` %%bigquery CREATE OR REPLACE TABLE serverlessml.cleaned_training_data AS SELECT (tolls_amount + fare_amount) AS fare_amount, pickup_longitude AS pickuplon, pickup_latitude AS pickuplat, dropoff_longitude AS dropofflon, dropoff_latitude AS dropofflat, passenger_count*1.0 AS passengers FROM `nyc-tlc.yellow.trips` WHERE MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 100000) = 1 AND trip_distance > 0 AND fare_amount >= 2.5 AND pickup_longitude > -78 AND pickup_longitude < -70 AND dropoff_longitude > -78 AND dropoff_longitude < -70 AND pickup_latitude > 37 AND pickup_latitude < 45 AND dropoff_latitude > 37 AND dropoff_latitude < 45 AND passenger_count > 0 %%bigquery -- LIMIT 0 is a free query; this allows us to check that the table exists. SELECT * FROM serverlessml.cleaned_training_data LIMIT 0 %%bigquery CREATE OR REPLACE MODEL serverlessml.model2_cleanup OPTIONS(input_label_cols=['fare_amount'], model_type='linear_reg') AS SELECT * FROM serverlessml.cleaned_training_data %%bigquery SELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL serverlessml.model2_cleanup) ``` ## Model 3: More sophisticated models What if we throw a more sophisticated model? Let's try boosted trees first: ### xgboost ``` %%bigquery -- These model types are in alpha, so they may not work for you yet. This training takes on the order of 15 minutes. CREATE OR REPLACE MODEL serverlessml.model3a_xgboost OPTIONS(input_label_cols=['fare_amount'], model_type='boosted_tree_regressor') AS SELECT * FROM serverlessml.cleaned_training_data %%bigquery SELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL serverlessml.model3a_xgboost) ``` Ouch! ### DNN Lately, though, deep neural networks are all the rage. What if we use DNNs instead? ``` %%bigquery -- These model types are in alpha, so they may not work for you yet. This training takes on the order of 15 minutes. CREATE OR REPLACE MODEL serverlessml.model3b_dnn OPTIONS(input_label_cols=['fare_amount'], model_type='dnn_regressor', hidden_units=[32, 8]) AS SELECT * FROM serverlessml.cleaned_training_data %%bigquery SELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL serverlessml.model3b_dnn) ``` Oh, wow! Later in this sequence of notebooks, we will get to below $4, but this is quite good, for very little work. In this notebook, we showed you how to use BigQuery ML to quickly build ML models. We will come back to BigQuery ML when we want to experiment with different types of feature engineering. The speed of BigQuery ML is very attractive for development. Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
github_jupyter
``` from IPython.display import HTML css_file = './custom.css' HTML(open(css_file, "r").read()) ``` # Gradient Boosted Trees © 2018 Daniel Voigt Godoy ``` from intuitiveml.supervised.regression.GradientBoostedTrees import * from intuitiveml.utils import * ``` ## 1. Definition From the Scikit-Learn [website](https://scikit-learn.org/stable/modules/ensemble.html): The goal of ensemble methods is to combine the predictions of several base estimators built with a given learning algorithm in order to improve generalizability / robustness over a single estimator. Two families of ensemble methods are usually distinguished: In averaging methods, the driving principle is to build several estimators independently and then to average their predictions. On average, the combined estimator is usually better than any of the single base estimator because its variance is reduced. Examples: Bagging methods, Forests of randomized trees, … By contrast, in boosting methods, base estimators are built sequentially and one tries to reduce the bias of the combined estimator. The motivation is to combine several weak models to produce a powerful ensemble. Examples: AdaBoost, Gradient Tree Boosting, … Let's stick with the ***second*** family this time, which ***Gradient Boosted Trees*** are a member of. Once again, the main idea is to train a whole ***bunch of Decision Trees***. But, this time, it is ***not*** going to ***simply average them***. This is not ***bagging***, it is... ### 1.1 (Gradient) Boosting This time, it will start by training ***ONE tree*** on the ***original dataset*** and evaluate its predictions, computing the corresponding ***residuals*** (errors). Then, for the ***NEXT*** tree, it will ***not*** use the ***original dataset*** anymore, but the ***residuals from the previous tree***! So, instead of averaging the predictions of individual trees, it ***adds them up*** to get the final predictions. Unlike bagging, which is highly parallelizable, ***boosting*** is a ***sequential*** process. This is ***Gradient Boosting*** in a nutshell! P.S.: There is actually more to it... [XGBoost](https://github.com/dmlc/xgboost) is one of the most popular and succesful algorithms and it uses many more improvements, but the underlying idea is still the same. ## 2. Experiment Time to try it yourself! This time, it is a regression problem! You have 5 data points with values between 1160 and 2000. This is your ***response**. Each point is associated with a single numerical value between 750 and 950. This is your ***feature***. For a regression, the initial step is to compute the ***average*** of all points and use it to compute the ***residuals*** to train the first tree. We'll see the reasoning behind this in the ***Linear Regression*** lesson. The sliders below allow you to train one (shown as zero in the slider) or multiple Decision Trees and choose the maximum depth they are allowed to have. For each new trained tree, it will show both ***residuals from the previous step*** and corresponding ***fitted tree*** on the left. On the right, it will ***add up*** the predictions of all trees up to that one. Use the sliders to play with different configurations and answer the questions below. ``` xreg = np.array([750., 800., 850., 900., 950.]) yreg = np.array([1160., 1200., 1280., 1450., 2000.]) mydtr = plotDecision(x=xreg, y=yreg) vb = VBox(build_figure_boost(mydtr), layout={'align_items': 'center'}) vb ``` #### Questions 1. What happens to the ***level of residuals*** as you increase the number of trees (keeping depth = 1)? 2. What happens if you ***increase the depth*** of the trees? Why? 3. Which one is best to use in GBTs, ***shallow*** or ***deep*** trees? Why ## 3. Scikit-Learn [Gradient Tree Boosting](https://scikit-learn.org/stable/modules/ensemble.html#gradient-tree-boosting) Please check Aurelién Geron's "Hand-On Machine Learning with Scikit-Learn and Tensorflow" notebook on Ensemble Methods [here](http://nbviewer.jupyter.org/github/ageron/handson-ml/blob/master/07_ensemble_learning_and_random_forests.ipynb). ## 4. More Resources [Difference between Bagging and Boosting](https://quantdare.com/what-is-the-difference-between-bagging-and-boosting/) [Complete Guide to Parameter Tuning in XGBoost](https://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/) [Interpretable Machine Learning with XGBoost](https://towardsdatascience.com/interpretable-machine-learning-with-xgboost-9ec80d148d27) [How to explain gradient boosting](https://explained.ai/gradient-boosting/index.html) [Mastering The New Generation of Gradient Boosting](https://towardsdatascience.com/https-medium-com-talperetz24-mastering-the-new-generation-of-gradient-boosting-db04062a7ea2) #### This material is copyright Daniel Voigt Godoy and made available under the Creative Commons Attribution (CC-BY) license ([link](https://creativecommons.org/licenses/by/4.0/)). #### Code is also made available under the MIT License ([link](https://opensource.org/licenses/MIT)). ``` from IPython.display import HTML HTML('''<script> function code_toggle() { if (code_shown){ $('div.input').hide('500'); $('#toggleButton').val('Show Code') } else { $('div.input').show('500'); $('#toggleButton').val('Hide Code') } code_shown = !code_shown } $( document ).ready(function(){ code_shown=false; $('div.input').hide() }); </script> <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>''') ```
github_jupyter
``` import pickle import numpy as np import tensorflow as tf import matplotlib.pyplot as plt import matplotlib import sys sys.path.insert(0, '../../') import DLDMD as dl import LossDLDMD as lf import Data as dat %matplotlib inline def edmd(x, num_pred): x = tf.transpose(x, perm=[0, 2, 1]) x_m = x[:, :, :-1] x_p = x[:, :, 1:] S, U, V = tf.linalg.svd(x_m, compute_uv=True, full_matrices=False) sm = np.max(S) r = S.shape[-1] Sri = tf.linalg.diag(1./S[:, :r]) Ur = U[:, :, :r] Urh = tf.linalg.adjoint(Ur) Vr = V[:, :, :r] kmat = x_p @ Vr @ Sri @ Urh evals, evecs = tf.linalg.eig(kmat) phim = tf.linalg.solve(evecs, tf.cast(x_m, dtype=tf.complex128)) x0 = phim[:, :, 0] x0 = x0[:, :, tf.newaxis] pred = tf.TensorArray(tf.complex128, size=num_pred) pred = pred.write(0, evecs @ x0) evals_iter = tf.identity(evals) for ii in range(num_pred): tmp = evecs @ tf.linalg.diag(evals_iter) @ x0 pred = pred.write(ii, tmp) evals_iter = evals_iter * evals pred = tf.transpose(tf.squeeze(pred.stack()), perm=[1, 2, 0]) return phim, evals, evecs, pred ``` # Setup ``` # Figure parameters plot_save_path = './analysis_results/' font = {'family': 'DejaVu Sans', 'size': 18} matplotlib.rc('font', **font) fontsize = 18 figsize = (15, 10) dpisave = 300 # Initialize the compute device DEVICE = '/GPU:0' GPUS = tf.config.experimental.list_physical_devices('GPU') if GPUS: try: for gpu in GPUS: tf.config.experimental.set_memory_growth(gpu, True) except RuntimeError as e: print(e) else: DEVICE = '/CPU:0' tf.keras.backend.set_floatx('float64') print("TensorFlow version: {}".format(tf.__version__)) print("Eager execution: {}".format(tf.executing_eagerly())) print("Num GPUs available: {}".format(len(GPUS))) print("Running on device: {}".format(DEVICE)) ``` # Load model and data ``` # SET THIS PATH (w/o file extension!). Both '.pkl' and '.h5' files should have same name model_path = './trained_models/duffing_2021-10-04-2358/epoch_10_loss_-0.955' model_hyp_params = model_path + '.pkl' model_weights = model_path + '.h5' # Load the hyper parameters hyp_params = pickle.load(open(model_hyp_params, 'rb')) # Set Tensorflow backend precision tf.keras.backend.set_floatx(hyp_params['precision']) print("Using precision: {}\n".format(tf.keras.backend.floatx())) # Load test data test_data = pickle.load(open('data_test.pkl', 'rb')) print("Test data shape: {}".format(test_data.shape)) # Generate some evenly spaced initial conditions nsteps = int(hyp_params['time_final'] / hyp_params['delta_t']) n_ic = int(40) icond1 = np.ones(shape=(n_ic, 1)) icond1[:20] = icond1[:20]*-1 icond2 = np.linspace(-1, 1, n_ic) data_mat = np.zeros((n_ic, 2, nsteps+1), dtype=np.float64) for ii in range(n_ic): data_mat[ii, :, 0] = np.array([icond1[ii], icond2[ii]], dtype=np.float64) for jj in range(nsteps): data_mat[ii, :, jj+1] = dat.rk4(data_mat[ii, :, jj], hyp_params['delta_t'], dat.dyn_sys_duffing) data = np.transpose(data_mat, [0, 2, 1]) data = tf.cast(data, dtype=hyp_params['precision']) test_data = data print(test_data.shape) # Fix hyper parameters for running the model on test data hyp_params['pretrain'] = False hyp_params['batch_size'] = test_data.shape[0] # Load the trained DLDMD model weights model = dl.DLDMD(hyp_params) model(test_data) model.load_weights(model_weights) # Initialize the loss function loss = lf.LossDLDMD(hyp_params) print("Number of prediction steps: ", model.num_pred_steps) ``` # Run the trained model ``` with tf.device(DEVICE): preds = model(test_data, training=False) losses = loss(preds, test_data) [y, x_ae, x_adv, y_adv, weights, evals, evecs, phi] = preds print("Loss: {loss:2.7f}".format(loss=losses.numpy())) print("Log10 Loss: {loss:2.7f}".format(loss=np.log10(losses.numpy()))) ``` # Run standard DMD ``` # EDMD on the unencoded data [phim, evals, evecs, x_dmd] = edmd(test_data, num_pred=test_data.shape[1]) x_dmd = np.real(tf.transpose(x_dmd, perm=[0, 2, 1])) ``` # Visualize results ``` fs = 20 ts = 20 lw = 2.0 ms = 20.0 figsize = (12, 12) skip = 1 # DLDMD reconstruction fig = plt.figure(1, figsize=figsize) for ii in range(0, test_data.shape[0], skip): plt.plot(test_data[ii, :, 0], test_data[ii, :, 1], 'k', linestyle='solid', lw=lw) plt.plot(x_adv[ii, :, 0], x_adv[ii, :, 1], 'k', linestyle='dotted', ms=ms) plt.plot(test_data[ii, :, 0], test_data[ii, :, 1], 'k', linestyle='solid', lw=lw, label='Test data') plt.plot(x_adv[ii, 0, 0], x_adv[ii, 0, 1], 'k', linestyle='dotted', ms=20*ms, label='DLDMD') plt.xlabel(r'$x$', fontsize=fs) plt.ylabel(r'$\dot{x}$', fontsize=fs) plt.legend(fontsize=fs, loc='upper right') plt.axis('equal') ax = plt.gca() ax.tick_params(axis='both', which='major', labelsize=ts) ax.tick_params(axis='both', which='minor', labelsize=ts) # DMD reconstruction fig = plt.figure(2, figsize=figsize) for ii in range(0, test_data.shape[0], skip): plt.plot(test_data[ii, :, 0], test_data[ii, :, 1], 'k', linestyle='solid', lw=lw) plt.plot(x_dmd[ii, :, 0], x_dmd[ii, :, 1], 'k', linestyle='dotted', ms=ms) plt.plot(test_data[ii, :, 0], test_data[ii, :, 1], 'k', linestyle='solid', lw=lw, label='Test data') plt.plot(x_dmd[ii, 0, 0], x_dmd[ii, 0, 1], 'k', linestyle='dotted', ms=20*ms, label='DMD') plt.xlabel(r'$x$', fontsize=fs) plt.ylabel(r'$\dot{x}$', fontsize=fs) plt.legend(fontsize=fs) plt.axis('equal') ax = plt.gca() ax.tick_params(axis='both', which='major', labelsize=ts) ax.tick_params(axis='both', which='minor', labelsize=ts) # Plot trajectories in the latent space skip = 2 fig = plt.figure(2, figsize=figsize) for ii in range(0, test_data.shape[0], skip): plt.plot(test_data[ii, :, 0], test_data[ii, :, 1], 'k', linestyle='solid', lw=lw) plt.xlabel(r'$x$', fontsize=fs) plt.ylabel(r'$\dot{x}$', fontsize=fs) plt.axis('equal') ax = plt.gca() ax.tick_params(axis='both', which='major', labelsize=ts) ax.tick_params(axis='both', which='minor', labelsize=ts) from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(figsize=figsize) ax = fig.add_subplot(1, 1, 1, projection='3d') for ii in range(0, y_adv.shape[0], skip): ax.plot3D(y[ii, :, 0], y[ii, :, 1], y[ii, :, 2], 'k', linestyle='solid', lw=lw) ax.set_xlabel(r'$\tilde{x}_{1}$', fontsize=50, labelpad=60.0) ax.set_ylabel(r'$\tilde{x}_{2}$', fontsize=50, labelpad=60.0) ax.set_zlabel(r'$\tilde{x}_{3}$', fontsize=50, labelpad=60.0) ax.tick_params(axis='x', labelsize=40, pad=15) ax.tick_params(axis='y', labelsize=40, pad=15) ax.tick_params(axis='z', labelsize=40, pad=15) ax.set_xticks(np.linspace(-0.05, 0.1, 4)) plt.gca().view_init(20, 160) plt.show() ```
github_jupyter
# Introduction This is an [R Markdown](http://rmarkdown.rstudio.com) Notebook for the analysis of the [bird ringing data Netherlands 1960-1990 part 1, led by Henk van der Jeugd](https://doi.org/10.17026/dans-2ch-6s6r). In an R notebook we can combine text, code and data together. The text is formated using [Markdown](), whereas data and code are located within ` ```{r}``` `blocks. An R Notebook can rely on external libraries. The following block adds the required `knitr` library, as well as some additional ones for data wrangling the calculation of indices. ``` # Data Analysis Libraries library(dplyr) library(tidyr) # [Community Ecology Package](https://cran.r-project.org/web/packages/vegan/index.html) library(vegan) # Visualization Libraries library(ggplot2) ``` # Loading and Cleaning Data For our first step, we will load the data and then view the top records as well as a summary of all variables included. ``` dansDataSet <- read.csv(file = "Export_DANS_Parels_van_Datasets_Vogeltrekstation.csv", header = TRUE) head(dansDataSet) summary(dansDataSet) ``` We observe that even though the data was loaded correctly, they are not used in the best possible way. For example, `Ringnumber`, `CatchDate` and `Age` are used as words rather than as numeric values. Also, missing values are defined as `NULL` which is not recognized as such by R (the correct value would be `NA`). The next block tidies the data, so that that each attribute is treated as originally intended. ``` dansDataSet <- data.frame(lapply(dansDataSet, function(x) { gsub("NULL", NA, x) })) dansDataSet$Ringnumber <- as.numeric(dansDataSet$Ringnumber) dansDataSet$CatchDate <- as.Date(dansDataSet$CatchDate) dansDataSet$Age <- as.numeric(dansDataSet$Age) dansDataSet$Broodsize <- as.numeric(dansDataSet$Broodsize) dansDataSet$PullusAge <- as.numeric(dansDataSet$PullusAge) dansDataSet$CatchDate <- as.Date(dansDataSet$CatchDate) head(dansDataSet) summary(dansDataSet) ``` We can see that the data is much more better formatted and useful for further analysis. # Subsetting our data Let's now create a few subsets of the original data. Subset #1 `dansDataSet_Castricum` will contain all the unique records for which `Location` is `Castricum, Noord-Holland, NL`. Then we will group the records by species and catch date, and calculate number of each species in the particular catch date. ``` dansDataSet_Castricum <- dansDataSet %>% filter(Location == "Castricum, Noord-Holland, NL") %>% select(unique.RingID = RingID, Species, CatchDate) %>% group_by(Species, CatchDate) %>% summarise(count = n()) ``` We could further filter this subset for a particular species. For example, the code below will retrieve all unique observations of Northern Lapwing in Castricum, Noord-Holland, NL. ``` dansDataSet_lapwing <- dansDataSet %>% filter(Location == "Castricum, Noord-Holland, NL") %>% select(unique.RingID = RingID, Species, CatchDate) %>% group_by(Species, CatchDate) %>% filter(as.POSIXct(CatchDate) >= as.POSIXct("1970-01-01 00:00:01")) %>% filter(Species == "Northern Lapwing") %>% summarise(count = n()) ``` Our second subset will create a matrix of the distribution of unique species across the different locations. This will consequently allow us to calculate some diversity indexes. ``` dansDataSet_distribution <- dansDataSet %>% select(unique.RingID = RingID, Species, Location) %>% group_by(Species, Location) %>% summarise(count = n()) %>% filter(count > 0) %>% na.omit() # spread(data, key, value) # data: A data frame # key: The (unquoted) name of the column whose values will be used as column headings. # value:The (unquoted) names of the column whose values will populate the cells dansDataSet_distribution_matrix <- dansDataSet_distribution %>% spread(Location, count) ``` We can also create a more specific subset, i.e. of species that have at least 100 unique observations in a given location. This will allow for a cleaner figure. ``` dansDataSet_distribution_min100 <- dansDataSet %>% select(unique.RingID = RingID, Species, Location) %>% group_by(Species, Location) %>% summarise(count = n()) %>% filter(count > 100) %>% na.omit() ``` # Using the `vegan` package We will now use the [`vegan`](https://cran.r-project.org/web/packages/vegan/index.html) package to calculate the diversity in the locations. ## Transforming the data to `vegan` requirements ``` dansDataSet_distribution_zero <- dansDataSet_distribution_matrix dansDataSet_distribution_zero[is.na(dansDataSet_distribution_zero)] <- 0 dansDataSet_distribution_zero <- t(dansDataSet_distribution_zero[,2:length(dansDataSet_distribution_zero)]) ``` ## Calculating diversity: **Shannon**, **Simpson** and **Inverted Simpson**. For each of these indexes, we are going to call the corresponding function from vegan, using the default parameters: Shannon or Shannon–Weaver (or Shannon–Wiener) index is defined as: $$H = -\sum_{n=1}^{R} p_i ln_b(p_i) = 1$$ where $p_i$ is the proportional abundance of species $i$ and $b$ is the base of the logarithm. It is most popular to use natural logarithms, but some argue for base $b = 2$. Both variants of Simpson's index are based on $D = \sum_{n=1}^{R}p_i^2$. Choice simpson returns $1-D$ and invsimpson returns $\frac{1}{D}$. ``` Hshannon <- diversity(dansDataSet_distribution_zero, index = "shannon", MARGIN = 1, base = exp(1)) simp <- diversity(dansDataSet_distribution_zero, "simpson", MARGIN = 1) invsimp <- diversity(dansDataSet_distribution_zero, "inv", MARGIN = 1) ``` ## Calculating species richness The function `rarefy` gives the expected species richness in random subsamples of size sample from the community. The size of sample should be smaller than total community size, but the function will silently work for larger sample as well and return non-rarefied species richness (and standard error equal to 0). If sample is a vector, `rarefaction` is performed for each sample size separately. Rarefaction can be performed only with genuine counts of individuals. The function rarefy is based on Hurlbert's (1971) formulation, and the standard errors on Heck et al. (1975). ``` r.2 <- rarefy(dansDataSet_distribution_zero, 2) ``` ## Calculating `fisher.alpha` This function estimates the $a$ parameter of Fisher's logarithmic series. The estimation is possible only for genuine counts of individuals. The function can optionally return standard errors of $a$. These should be regarded only as rough indicators of the accuracy: the confidence limits of $a$ are strongly non-symmetric and the standard errors cannot be used in Normal inference. ``` alpha <- fisher.alpha(dansDataSet_distribution_zero) ``` ## Richness and Evenness Species **richness** (S) is calculated by `specnumber` which finds the number of species. If MARGIN is set to 2, it finds frequencies of species. **Pielou's evenness** (J) is calculated by $\frac{H_shannon}{log(S)}$. ``` S <- specnumber(dansDataSet_distribution_zero, MARGIN = 1) ## rowSums(BCI > 0) does the same... J <- Hshannon/log(S) ``` In order to have all these indeces together, we will put them in a single data frame as follows: ``` metrics <- data.frame( H_Shannon = Hshannon, H_Simp = simp, H_Inv_Simp = invsimp, rarefy = r.2, a = alpha, richness = S, evenness = J ) ``` # Results Finally, let's also create some plots. First of all, let's create a plot based on our first subset, showing for each species and capture dates, the average age of the species captured. ``` png("subset1a1.png", width = 4000, height = 2000, res = 300, pointsize = 5) ggplot(data=dansDataSet_Castricum, aes(x=CatchDate, y=Species, color=count)) + geom_point(aes(size=count)) dev.off() ``` ![_First subset plot: points_](subset1a1.png) ``` png("subset1a2.png", width = 4000, height = 2000, res = 300, pointsize = 5) ggplot(data=dansDataSet_Castricum, aes(x=CatchDate, y=count, colour=Species)) + geom_line() dev.off() ``` ![_First subset plot: lines_](subset1a2.png) We can do the same plots for the single species that we looked into earlier (Northern Lapwing in Castricum, Noord-Holland, NL). ``` png("subset1b1.png", width = 4000, height = 2000, res = 300, pointsize = 5) ggplot(data=dansDataSet_lapwing, aes(x=CatchDate, y=Species, color=count)) + geom_point(aes(size=count)) dev.off() ``` ![_First subset plot: Northern Lapwin points_](subset1b1.png) This is not really easy to interpret. However, we can now have a more interesting plot with the `lines` command, including a smoothing curve to show the overall trend: ``` png("subset1b2.png", width = 4000, height = 2000, res = 300, pointsize = 5) ggplot(data=dansDataSet_lapwing, aes(x=CatchDate, y=count, colour=Species)) + geom_point(aes(x = CatchDate, y = count, colour = Species), size = 3) + stat_smooth(aes(x = CatchDate, y = count), method = "lm", formula = y ~ poly(x, 3), se = FALSE) dev.off() ``` ![_First subset plot: Northern Lapwin lines](subset1b2.png) We can also create a plot based on the second subset. In this case, let's see how the distribution of species across the seven locations looks like. ``` lvls <- unique(as.vector(dansDataSet_distribution$Location)) png("subset2a.png", width = 4000, height = 2000, res = 300, pointsize = 5) ggplot(data=dansDataSet_distribution, aes(x=Species, y=Location, color=Species)) + geom_point(aes(size=count)) + theme(text=element_text(family="Arial", size=12*(81/169)), axis.text.x = element_text(angle = 90, hjust = 1, vjust=0.3)) + scale_y_discrete(breaks=lvls[seq(1,length(lvls),by=10)]) #scale_y_discrete(labels = abbreviate) dev.off() ``` ![_Second subset plot_](subset2a.png) This is a very "dense" figure, so let's use the filtered version to see the most highly populated species. ``` png("subset2b.png", width = 4000, height = 2000, res = 300, pointsize = 5) ggplot(data=dansDataSet_distribution_min100, aes(x=Species, y=Location, color=Species)) + geom_point(aes(size=count)) + theme(text=element_text(family="Arial", size=12*(81/169)), axis.text.x = element_text(angle = 90, hjust = 1, vjust=0.3)) dev.off() ``` ![_Second subset plot_](subset2b.png) Finally, let's have a figure showing all 5 indexes together. ``` png("metrics.png", width = 4000, height = 2000, res = 300, pointsize = 5) plot(metrics, pch="+", col="blue") dev.off() ``` ![_Second subset plot_](metrics.png) We could also show the most diverse sites (i.e. richness index over 10). ``` top10_site_metrics <- metrics %>% tibble::rownames_to_column() %>% filter(richness >= 10) %>% arrange(desc(richness)) top10_site_metrics ``` # Conclusions Jupyter notebooks and R is awesome!
github_jupyter
# 基本程序设计 - 一切代码输入,请使用英文输入法 ``` print('hello word') print 'hello' ``` ## 编写一个简单的程序 - 圆公式面积: area = radius \* radius \* 3.1415 ``` radius = 1.0 area = radius * radius * 3.14 # 将后半部分的结果赋值给变量area # 变量一定要有初始值!!! # radius: 变量.area: 变量! # int 类型 print(area) ``` ### 在Python里面不需要定义数据的类型 ## 控制台的读取与输入 - input 输入进去的是字符串 - eval ``` radius = input('请输入半径') # input得到的结果是字符串类型 radius = float(radius) area = radius * radius * 3.14 print('面积为:',area) ``` - 在jupyter用shift + tab 键可以跳出解释文档 ## 变量命名的规范 #等号前面的就是变量名 - 由字母、数字、下划线构成 中间也不能有空格 - 不能以数字开头 \* - 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合) - 可以是任意长度 - 驼峰式命名一个单词的首字母大写 #不要轻易给print赋值,会出错 ## 变量、赋值语句和赋值表达式 - 变量: 通俗理解为可以变化的量 - x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式 - test = test + 1 \* 变量在赋值之前必须有值 #需要一个公式 ## 同时赋值 var1, var2,var3... = exp1,exp2,exp3... ``` #同时赋值的例子 a,b=1,1 c=a+b print(c) ``` ## 定义常量 - 常量:表示一种定值标识符,适合于多次使用的场景。比如PI 在python中可以修改 - 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 ``` a=100 b=100 a=1#这里将a这个常量重新定义 c=a+b ``` ## 数值数据类型和运算符 - 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 <img src = "../Photo/01.jpg"></img> ## 运算符 /、除 //整除、**指数 ## 运算符 %取模 ``` x=6 c=17.521*x**3+15.212*x/27.1 print(c) #如何用eval进行运算 x = eval(input("请输x")) res=eval(input('输入表达式')) print(res) ``` ## EP: - 25/4 多少,如果要将其转变为整数该怎么改写 - 输入一个数字判断是奇数还是偶数 - 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒 - 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天 ``` a=25 b=4 c=a//b print(c) #如何用eval进行运算 x = eval(input("请输x")) res=eval(input('输入表达式')) print(res) #如何用eval进行运算 miao = eval(input("请输miao")) fen=eval(input('输入表达式')) miao=eval(input('输入表达式')) print(fen('分'),miao('秒')) #如何用eval进行运算 miao = eval(input("请输miao")) fen=eval(input('输入表达式')) miao=eval(input('输入表达式')) print(fen,"分",miao,"秒") x=input() x=int(x) fen=x//60 miao=x%60 print(fen,"分",miao,"秒") #星期的问题 x=input("输入星期几") x=int(x) week=(x+10)%7 print(week) #星期的问题 x=input("输入星期几") x=int(x) week=(x+10)%7 print(week) ``` ## 科学计数法 - 1.234e+2 - 1.234e-2 ``` 1.234e2 ``` ## 计算表达式和运算优先级 <img src = "../Photo/02.png"></img> <img src = "../Photo/03.png"></img> ``` #graph计量图 part_1 = (3+4x)/5 part_2 = part_3 = ``` ## 增强型赋值运算 <img src = "../Photo/04.png"></img> ``` a = a+1 a += 1 ``` ## 类型转换 - float -> int - 四舍五入 round ``` round(1.45,1) round(1.45,2) round(1.46,1) ``` ## EP: - 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数) - 必须使用科学计数法 ``` res = 0.06*197.55 round(res,2) ``` # Project - 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment) ![](../Photo/05.png) ``` yuegong = (daikuanshu*yuelilv/(1-(1/(1+yuelilv)^nianxian*12))) ``` # Homework - 1 <img src="../Photo/06.png"></img> ``` celsius = eval(input("输入摄氏温度")) fahrenheit = (9/5) * celsius + 32 print(fahrenheit) ``` - 2 <img src="../Photo/07.png"></img> ``` radius = input('请输入半径') radius = float(radius) area = radius * radius * 3.1415 print('面积为:',area) banjing = eval(input("半径")) gao = eval(input("高")) tiji = banjing*banjing*3.1415*gao print("体积;",tiji) ``` - 3 <img src="../Photo/08.png"></img> ``` yingchi = eval(input("英尺数")) mine = eval("yingchi * 0.0305") print(mine,"米") ``` - 4 <img src="../Photo/10.png"></img> ``` M = eval(input("输入水量")) intitialTemperature = eval(input("初始温度")) finalTemperature= eval(input("最终温度")) Q = eval("M * (finalTemperature - intitialTemperature) * 4184") print(Q) ``` - 5 <img src="../Photo/11.png"></img> ``` chae = eval(input("输入差额")) nianlilv = eval(input("年利率")) lixi = eval("chae * (nianlilv / 1200)") print(lixi) ``` - 6 <img src="../Photo/12.png"></img> ``` shusu = eval(input("输入初始速度")) mosu = eval(input("末速度")) shijian = eval(input("时间")) a = (mosu - shusu)/shijian print(a) ``` - 7 进阶 <img src="../Photo/13.png"></img> ``` yiyue = input("美元") yiyue = int(yiyue) yihou = yiyue*(1+0.00417) erhou = (yiyue+yihou)*(1+0.00417) sanhou = (yiyue+erhou)*(1+0.00417) sihou = (yiyue+sanhou)*(1+0.00417) wuhou = (yiyue+sihou)*(1+0.00417) liuhou = (yiyue+wuhou)*(1+0.00417) print(round(liuhou,2)) ``` - 8 进阶 <img src="../Photo/14.png"></img> ``` a,b = eval(input('>>')) print(a,b) print(type(a),type(b)) a = eval(input('>>')) print(a) ```
github_jupyter
**Tools - pandas** *The `pandas` library provides high-performance, easy-to-use data structures and data analysis tools. The main data structure is the `DataFrame`, which you can think of as an in-memory 2D table (like a spreadsheet, with column names and row labels). Many features available in Excel are available programmatically, such as creating pivot tables, computing columns based on other columns, plotting graphs, etc. You can also group rows by column value, or join tables much like in SQL. Pandas is also great at handling time series.* Prerequisites: * NumPy – if you are not familiar with NumPy, we recommend that you go through the [NumPy tutorial](tools_numpy.ipynb) now. # Setup First, let's import `pandas`. People usually import it as `pd`: ``` import pandas as pd ``` # `Series` objects The `pandas` library contains these useful data structures: * `Series` objects, that we will discuss now. A `Series` object is 1D array, similar to a column in a spreadsheet (with a column name and row labels). * `DataFrame` objects. This is a 2D table, similar to a spreadsheet (with column names and row labels). * `Panel` objects. You can see a `Panel` as a dictionary of `DataFrame`s. These are less used, so we will not discuss them here. ## Creating a `Series` Let's start by creating our first `Series` object! ``` s = pd.Series([2,-1,3,5]) s ``` ## Similar to a 1D `ndarray` `Series` objects behave much like one-dimensional NumPy `ndarray`s, and you can often pass them as parameters to NumPy functions: ``` import numpy as np np.exp(s) ``` Arithmetic operations on `Series` are also possible, and they apply *elementwise*, just like for `ndarray`s: ``` s + [1000,2000,3000,4000] ``` Similar to NumPy, if you add a single number to a `Series`, that number is added to all items in the `Series`. This is called * broadcasting*: ``` s + 1000 ``` The same is true for all binary operations such as `*` or `/`, and even conditional operations: ``` s < 0 ``` ## Index labels Each item in a `Series` object has a unique identifier called the *index label*. By default, it is simply the rank of the item in the `Series` (starting at `0`) but you can also set the index labels manually: ``` s2 = pd.Series([68, 83, 112, 68], index=["alice", "bob", "charles", "darwin"]) s2 ``` You can then use the `Series` just like a `dict`: ``` s2["bob"] ``` You can still access the items by integer location, like in a regular array: ``` s2[1] ``` To make it clear when you are accessing by label or by integer location, it is recommended to always use the `loc` attribute when accessing by label, and the `iloc` attribute when accessing by integer location: ``` s2.loc["bob"] s2.iloc[1] ``` Slicing a `Series` also slices the index labels: ``` s2.iloc[1:3] ``` This can lead to unexpected results when using the default numeric labels, so be careful: ``` surprise = pd.Series([1000, 1001, 1002, 1003]) surprise surprise_slice = surprise[2:] surprise_slice ``` Oh look! The first element has index label `2`. The element with index label `0` is absent from the slice: ``` try: surprise_slice[0] except KeyError as e: print("Key error:", e) ``` But remember that you can access elements by integer location using the `iloc` attribute. This illustrates another reason why it's always better to use `loc` and `iloc` to access `Series` objects: ``` surprise_slice.iloc[0] ``` ## Init from `dict` You can create a `Series` object from a `dict`. The keys will be used as index labels: ``` weights = {"alice": 68, "bob": 83, "colin": 86, "darwin": 68} s3 = pd.Series(weights) s3 ``` You can control which elements you want to include in the `Series` and in what order by explicitly specifying the desired `index`: ``` s4 = pd.Series(weights, index = ["colin", "alice"]) s4 ``` ## Automatic alignment When an operation involves multiple `Series` objects, `pandas` automatically aligns items by matching index labels. ``` print(s2.keys()) print(s3.keys()) s2 + s3 ``` The resulting `Series` contains the union of index labels from `s2` and `s3`. Since `"colin"` is missing from `s2` and `"charles"` is missing from `s3`, these items have a `NaN` result value. (ie. Not-a-Number means *missing*). Automatic alignment is very handy when working with data that may come from various sources with varying structure and missing items. But if you forget to set the right index labels, you can have surprising results: ``` s5 = pd.Series([1000,1000,1000,1000]) print("s2 =", s2.values) print("s5 =", s5.values) s2 + s5 ``` Pandas could not align the `Series`, since their labels do not match at all, hence the full `NaN` result. ## Init with a scalar You can also initialize a `Series` object using a scalar and a list of index labels: all items will be set to the scalar. ``` meaning = pd.Series(42, ["life", "universe", "everything"]) meaning ``` ## `Series` name A `Series` can have a `name`: ``` s6 = pd.Series([83, 68], index=["bob", "alice"], name="weights") s6 ``` ## Plotting a `Series` Pandas makes it easy to plot `Series` data using matplotlib (for more details on matplotlib, check out the [matplotlib tutorial](tools_matplotlib.ipynb)). Just import matplotlib and call the `plot()` method: ``` %matplotlib inline import matplotlib.pyplot as plt temperatures = [4.4,5.1,6.1,6.2,6.1,6.1,5.7,5.2,4.7,4.1,3.9,3.5] s7 = pd.Series(temperatures, name="Temperature") s7.plot() plt.show() ``` There are *many* options for plotting your data. It is not necessary to list them all here: if you need a particular type of plot (histograms, pie charts, etc.), just look for it in the excellent [Visualization](http://pandas.pydata.org/pandas-docs/stable/visualization.html) section of pandas' documentation, and look at the example code. # Handling time Many datasets have timestamps, and pandas is awesome at manipulating such data: * it can represent periods (such as 2016Q3) and frequencies (such as "monthly"), * it can convert periods to actual timestamps, and *vice versa*, * it can resample data and aggregate values any way you like, * it can handle timezones. ## Time range Let's start by creating a time series using `pd.date_range()`. This returns a `DatetimeIndex` containing one datetime per hour for 12 hours starting on October 29th 2016 at 5:30pm. ``` dates = pd.date_range('2016/10/29 5:30pm', periods=12, freq='H') dates ``` This `DatetimeIndex` may be used as an index in a `Series`: ``` temp_series = pd.Series(temperatures, dates) temp_series ``` Let's plot this series: ``` temp_series.plot(kind="bar") plt.grid(True) plt.show() ``` ## Resampling Pandas lets us resample a time series very simply. Just call the `resample()` method and specify a new frequency: ``` temp_series_freq_2H = temp_series.resample("2H") temp_series_freq_2H ``` The resampling operation is actually a deferred operation, which is why we did not get a `Series` object, but a `DatetimeIndexResampler` object instead. To actually perform the resampling operation, we can simply call the `mean()` method: Pandas will compute the mean of every pair of consecutive hours: ``` temp_series_freq_2H = temp_series_freq_2H.mean() ``` Let's plot the result: ``` temp_series_freq_2H.plot(kind="bar") plt.show() ``` Note how the values have automatically been aggregated into 2-hour periods. If we look at the 6-8pm period, for example, we had a value of `5.1` at 6:30pm, and `6.1` at 7:30pm. After resampling, we just have one value of `5.6`, which is the mean of `5.1` and `6.1`. Rather than computing the mean, we could have used any other aggregation function, for example we can decide to keep the minimum value of each period: ``` temp_series_freq_2H = temp_series.resample("2H").min() temp_series_freq_2H ``` Or, equivalently, we could use the `apply()` method instead: ``` temp_series_freq_2H = temp_series.resample("2H").apply(np.min) temp_series_freq_2H ``` ## Upsampling and interpolation This was an example of downsampling. We can also upsample (ie. increase the frequency), but this creates holes in our data: ``` temp_series_freq_15min = temp_series.resample("15Min").mean() temp_series_freq_15min.head(n=10) # `head` displays the top n values ``` One solution is to fill the gaps by interpolating. We just call the `interpolate()` method. The default is to use linear interpolation, but we can also select another method, such as cubic interpolation: ``` temp_series_freq_15min = temp_series.resample("15Min").interpolate(method="cubic") temp_series_freq_15min.head(n=10) temp_series.plot(label="Period: 1 hour") temp_series_freq_15min.plot(label="Period: 15 minutes") plt.legend() plt.show() ``` ## Timezones By default datetimes are *naive*: they are not aware of timezones, so 2016-10-30 02:30 might mean October 30th 2016 at 2:30am in Paris or in New York. We can make datetimes timezone *aware* by calling the `tz_localize()` method: ``` temp_series_ny = temp_series.tz_localize("America/New_York") temp_series_ny ``` Note that `-04:00` is now appended to all the datetimes. This means that these datetimes refer to [UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time) - 4 hours. We can convert these datetimes to Paris time like this: ``` temp_series_paris = temp_series_ny.tz_convert("Europe/Paris") temp_series_paris ``` You may have noticed that the UTC offset changes from `+02:00` to `+01:00`: this is because France switches to winter time at 3am that particular night (time goes back to 2am). Notice that 2:30am occurs twice! Let's go back to a naive representation (if you log some data hourly using local time, without storing the timezone, you might get something like this): ``` temp_series_paris_naive = temp_series_paris.tz_localize(None) temp_series_paris_naive ``` Now `02:30` is really ambiguous. If we try to localize these naive datetimes to the Paris timezone, we get an error: ``` try: temp_series_paris_naive.tz_localize("Europe/Paris") except Exception as e: print(type(e)) print(e) ``` Fortunately using the `ambiguous` argument we can tell pandas to infer the right DST (Daylight Saving Time) based on the order of the ambiguous timestamps: ``` temp_series_paris_naive.tz_localize("Europe/Paris", ambiguous="infer") ``` ## Periods The `pd.period_range()` function returns a `PeriodIndex` instead of a `DatetimeIndex`. For example, let's get all quarters in 2016 and 2017: ``` quarters = pd.period_range('2016Q1', periods=8, freq='Q') quarters ``` Adding a number `N` to a `PeriodIndex` shifts the periods by `N` times the `PeriodIndex`'s frequency: ``` quarters + 3 ``` The `asfreq()` method lets us change the frequency of the `PeriodIndex`. All periods are lengthened or shortened accordingly. For example, let's convert all the quarterly periods to monthly periods (zooming in): ``` quarters.asfreq("M") ``` By default, the `asfreq` zooms on the end of each period. We can tell it to zoom on the start of each period instead: ``` quarters.asfreq("M", how="start") ``` And we can zoom out: ``` quarters.asfreq("A") ``` Of course we can create a `Series` with a `PeriodIndex`: ``` quarterly_revenue = pd.Series([300, 320, 290, 390, 320, 360, 310, 410], index = quarters) quarterly_revenue quarterly_revenue.plot(kind="line") plt.show() ``` We can convert periods to timestamps by calling `to_timestamp`. By default this will give us the first day of each period, but by setting `how` and `freq`, we can get the last hour of each period: ``` last_hours = quarterly_revenue.to_timestamp(how="end", freq="H") last_hours ``` And back to periods by calling `to_period`: ``` last_hours.to_period() ``` Pandas also provides many other time-related functions that we recommend you check out in the [documentation](http://pandas.pydata.org/pandas-docs/stable/timeseries.html). To whet your appetite, here is one way to get the last business day of each month in 2016, at 9am: ``` months_2016 = pd.period_range("2016", periods=12, freq="M") one_day_after_last_days = months_2016.asfreq("D") + 1 last_bdays = one_day_after_last_days.to_timestamp() - pd.tseries.offsets.BDay() last_bdays.to_period("H") + 9 ``` # `DataFrame` objects A DataFrame object represents a spreadsheet, with cell values, column names and row index labels. You can define expressions to compute columns based on other columns, create pivot-tables, group rows, draw graphs, etc. You can see `DataFrame`s as dictionaries of `Series`. ## Creating a `DataFrame` You can create a DataFrame by passing a dictionary of `Series` objects: ``` people_dict = { "weight": pd.Series([68, 83, 112], index=["alice", "bob", "charles"]), "birthyear": pd.Series([1984, 1985, 1992], index=["bob", "alice", "charles"], name="year"), "children": pd.Series([0, 3], index=["charles", "bob"]), "hobby": pd.Series(["Biking", "Dancing"], index=["alice", "bob"]), } people = pd.DataFrame(people_dict) people ``` A few things to note: * the `Series` were automatically aligned based on their index, * missing values are represented as `NaN`, * `Series` names are ignored (the name `"year"` was dropped), * `DataFrame`s are displayed nicely in Jupyter notebooks, woohoo! You can access columns pretty much as you would expect. They are returned as `Series` objects: ``` people["birthyear"] ``` You can also get multiple columns at once: ``` people[["birthyear", "hobby"]] ``` If you pass a list of columns and/or index row labels to the `DataFrame` constructor, it will guarantee that these columns and/or rows will exist, in that order, and no other column/row will exist. For example: ``` d2 = pd.DataFrame( people_dict, columns=["birthyear", "weight", "height"], index=["bob", "alice", "eugene"] ) d2 ``` Another convenient way to create a `DataFrame` is to pass all the values to the constructor as an `ndarray`, or a list of lists, and specify the column names and row index labels separately: ``` values = [ [1985, np.nan, "Biking", 68], [1984, 3, "Dancing", 83], [1992, 0, np.nan, 112] ] d3 = pd.DataFrame( values, columns=["birthyear", "children", "hobby", "weight"], index=["alice", "bob", "charles"] ) d3 ``` To specify missing values, you can either use `np.nan` or NumPy's masked arrays: ``` masked_array = np.ma.asarray(values, dtype=np.object) masked_array[(0, 2), (1, 2)] = np.ma.masked d3 = pd.DataFrame( masked_array, columns=["birthyear", "children", "hobby", "weight"], index=["alice", "bob", "charles"] ) d3 ``` Instead of an `ndarray`, you can also pass a `DataFrame` object: ``` d4 = pd.DataFrame( d3, columns=["hobby", "children"], index=["alice", "bob"] ) d4 ``` It is also possible to create a `DataFrame` with a dictionary (or list) of dictionaries (or list): ``` people = pd.DataFrame({ "birthyear": {"alice":1985, "bob": 1984, "charles": 1992}, "hobby": {"alice":"Biking", "bob": "Dancing"}, "weight": {"alice":68, "bob": 83, "charles": 112}, "children": {"bob": 3, "charles": 0} }) people ``` ## Multi-indexing If all columns are tuples of the same size, then they are understood as a multi-index. The same goes for row index labels. For example: ``` d5 = pd.DataFrame( { ("public", "birthyear"): {("Paris","alice"):1985, ("Paris","bob"): 1984, ("London","charles"): 1992}, ("public", "hobby"): {("Paris","alice"):"Biking", ("Paris","bob"): "Dancing"}, ("private", "weight"): {("Paris","alice"):68, ("Paris","bob"): 83, ("London","charles"): 112}, ("private", "children"): {("Paris", "alice"):np.nan, ("Paris","bob"): 3, ("London","charles"): 0} } ) d5 ``` You can now get a `DataFrame` containing all the `"public"` columns very simply: ``` d5["public"] d5["public", "hobby"] # Same result as d5["public"]["hobby"] ``` ## Dropping a level Let's look at `d5` again: ``` d5 ``` There are two levels of columns, and two levels of indices. We can drop a column level by calling `droplevel()` (the same goes for indices): ``` d5.columns = d5.columns.droplevel(level = 0) d5 ``` ## Transposing You can swap columns and indices using the `T` attribute: ``` d6 = d5.T d6 ``` ## Stacking and unstacking levels Calling the `stack()` method will push the lowest column level after the lowest index: ``` d7 = d6.stack() d7 ``` Note that many `NaN` values appeared. This makes sense because many new combinations did not exist before (eg. there was no `bob` in `London`). Calling `unstack()` will do the reverse, once again creating many `NaN` values. ``` d8 = d7.unstack() d8 ``` If we call `unstack` again, we end up with a `Series` object: ``` d9 = d8.unstack() d9 ``` The `stack()` and `unstack()` methods let you select the `level` to stack/unstack. You can even stack/unstack multiple levels at once: ``` d10 = d9.unstack(level = (0,1)) d10 ``` ## Most methods return modified copies As you may have noticed, the `stack()` and `unstack()` methods do not modify the object they apply to. Instead, they work on a copy and return that copy. This is true of most methods in pandas. ## Accessing rows Let's go back to the `people` `DataFrame`: ``` people ``` The `loc` attribute lets you access rows instead of columns. The result is a `Series` object in which the `DataFrame`'s column names are mapped to row index labels: ``` people.loc["charles"] ``` You can also access rows by integer location using the `iloc` attribute: ``` people.iloc[2] ``` You can also get a slice of rows, and this returns a `DataFrame` object: ``` people.iloc[1:3] ``` Finally, you can pass a boolean array to get the matching rows: ``` people[np.array([True, False, True])] ``` This is most useful when combined with boolean expressions: ``` people[people["birthyear"] < 1990] ``` ## Adding and removing columns You can generally treat `DataFrame` objects like dictionaries of `Series`, so the following work fine: ``` people people["age"] = 2018 - people["birthyear"] # adds a new column "age" people["over 30"] = people["age"] > 30 # adds another column "over 30" birthyears = people.pop("birthyear") del people["children"] people birthyears ``` When you add a new colum, it must have the same number of rows. Missing rows are filled with NaN, and extra rows are ignored: ``` people["pets"] = pd.Series({"bob": 0, "charles": 5, "eugene":1}) # alice is missing, eugene is ignored people ``` When adding a new column, it is added at the end (on the right) by default. You can also insert a column anywhere else using the `insert()` method: ``` people.insert(1, "height", [172, 181, 185]) people ``` ## Assigning new columns You can also create new columns by calling the `assign()` method. Note that this returns a new `DataFrame` object, the original is not modified: ``` people.assign( body_mass_index = people["weight"] / (people["height"] / 100) ** 2, has_pets = people["pets"] > 0 ) ``` Note that you cannot access columns created within the same assignment: ``` try: people.assign( body_mass_index = people["weight"] / (people["height"] / 100) ** 2, overweight = people["body_mass_index"] > 25 ) except KeyError as e: print("Key error:", e) ``` The solution is to split this assignment in two consecutive assignments: ``` d6 = people.assign(body_mass_index = people["weight"] / (people["height"] / 100) ** 2) d6.assign(overweight = d6["body_mass_index"] > 25) ``` Having to create a temporary variable `d6` is not very convenient. You may want to just chain the assigment calls, but it does not work because the `people` object is not actually modified by the first assignment: ``` try: (people .assign(body_mass_index = people["weight"] / (people["height"] / 100) ** 2) .assign(overweight = people["body_mass_index"] > 25) ) except KeyError as e: print("Key error:", e) ``` But fear not, there is a simple solution. You can pass a function to the `assign()` method (typically a `lambda` function), and this function will be called with the `DataFrame` as a parameter: ``` (people .assign(body_mass_index = lambda df: df["weight"] / (df["height"] / 100) ** 2) .assign(overweight = lambda df: df["body_mass_index"] > 25) ) ``` Problem solved! ## Evaluating an expression A great feature supported by pandas is expression evaluation. This relies on the `numexpr` library which must be installed. ``` people.eval("weight / (height/100) ** 2 > 25") ``` Assignment expressions are also supported. Let's set `inplace=True` to directly modify the `DataFrame` rather than getting a modified copy: ``` people.eval("body_mass_index = weight / (height/100) ** 2", inplace=True) people ``` You can use a local or global variable in an expression by prefixing it with `'@'`: ``` overweight_threshold = 30 people.eval("overweight = body_mass_index > @overweight_threshold", inplace=True) people ``` ## Querying a `DataFrame` The `query()` method lets you filter a `DataFrame` based on a query expression: ``` people.query("age > 30 and pets == 0") ``` ## Sorting a `DataFrame` You can sort a `DataFrame` by calling its `sort_index` method. By default it sorts the rows by their index label, in ascending order, but let's reverse the order: ``` people.sort_index(ascending=False) ``` Note that `sort_index` returned a sorted *copy* of the `DataFrame`. To modify `people` directly, we can set the `inplace` argument to `True`. Also, we can sort the columns instead of the rows by setting `axis=1`: ``` people.sort_index(axis=1, inplace=True) people ``` To sort the `DataFrame` by the values instead of the labels, we can use `sort_values` and specify the column to sort by: ``` people.sort_values(by="age", inplace=True) people ``` ## Plotting a `DataFrame` Just like for `Series`, pandas makes it easy to draw nice graphs based on a `DataFrame`. For example, it is trivial to create a line plot from a `DataFrame`'s data by calling its `plot` method: ``` people.plot(kind = "line", x = "body_mass_index", y = ["height", "weight"]) plt.show() ``` You can pass extra arguments supported by matplotlib's functions. For example, we can create scatterplot and pass it a list of sizes using the `s` argument of matplotlib's `scatter()` function: ``` people.plot(kind = "scatter", x = "height", y = "weight", s=[40, 120, 200]) plt.show() ``` Again, there are way too many options to list here: the best option is to scroll through the [Visualization](http://pandas.pydata.org/pandas-docs/stable/visualization.html) page in pandas' documentation, find the plot you are interested in and look at the example code. ## Operations on `DataFrame`s Although `DataFrame`s do not try to mimick NumPy arrays, there are a few similarities. Let's create a `DataFrame` to demonstrate this: ``` grades_array = np.array([[8,8,9],[10,9,9],[4, 8, 2], [9, 10, 10]]) grades = pd.DataFrame(grades_array, columns=["sep", "oct", "nov"], index=["alice","bob","charles","darwin"]) grades ``` You can apply NumPy mathematical functions on a `DataFrame`: the function is applied to all values: ``` np.sqrt(grades) ``` Similarly, adding a single value to a `DataFrame` will add that value to all elements in the `DataFrame`. This is called *broadcasting*: ``` grades + 1 ``` Of course, the same is true for all other binary operations, including arithmetic (`*`,`/`,`**`...) and conditional (`>`, `==`...) operations: ``` grades >= 5 ``` Aggregation operations, such as computing the `max`, the `sum` or the `mean` of a `DataFrame`, apply to each column, and you get back a `Series` object: ``` grades.mean() ``` The `all` method is also an aggregation operation: it checks whether all values are `True` or not. Let's see during which months all students got a grade greater than `5`: ``` (grades > 5).all() ``` Most of these functions take an optional `axis` parameter which lets you specify along which axis of the `DataFrame` you want the operation executed. The default is `axis=0`, meaning that the operation is executed vertically (on each column). You can set `axis=1` to execute the operation horizontally (on each row). For example, let's find out which students had all grades greater than `5`: ``` (grades > 5).all(axis = 1) ``` The `any` method returns `True` if any value is True. Let's see who got at least one grade 10: ``` (grades == 10).any(axis = 1) ``` If you add a `Series` object to a `DataFrame` (or execute any other binary operation), pandas attempts to broadcast the operation to all *rows* in the `DataFrame`. This only works if the `Series` has the same size as the `DataFrame`s rows. For example, let's substract the `mean` of the `DataFrame` (a `Series` object) from the `DataFrame`: ``` grades - grades.mean() # equivalent to: grades - [7.75, 8.75, 7.50] ``` We substracted `7.75` from all September grades, `8.75` from October grades and `7.50` from November grades. It is equivalent to substracting this `DataFrame`: ``` pd.DataFrame([[7.75, 8.75, 7.50]]*4, index=grades.index, columns=grades.columns) ``` If you want to substract the global mean from every grade, here is one way to do it: ``` grades - grades.values.mean() # substracts the global mean (8.00) from all grades ``` ## Automatic alignment Similar to `Series`, when operating on multiple `DataFrame`s, pandas automatically aligns them by row index label, but also by column names. Let's create a `DataFrame` with bonus points for each person from October to December: ``` bonus_array = np.array([[0,np.nan,2],[np.nan,1,0],[0, 1, 0], [3, 3, 0]]) bonus_points = pd.DataFrame(bonus_array, columns=["oct", "nov", "dec"], index=["bob","colin", "darwin", "charles"]) bonus_points grades + bonus_points ``` Looks like the addition worked in some cases but way too many elements are now empty. That's because when aligning the `DataFrame`s, some columns and rows were only present on one side, and thus they were considered missing on the other side (`NaN`). Then adding `NaN` to a number results in `NaN`, hence the result. ## Handling missing data Dealing with missing data is a frequent task when working with real life data. Pandas offers a few tools to handle missing data. Let's try to fix the problem above. For example, we can decide that missing data should result in a zero, instead of `NaN`. We can replace all `NaN` values by a any value using the `fillna()` method: ``` (grades + bonus_points).fillna(0) ``` It's a bit unfair that we're setting grades to zero in September, though. Perhaps we should decide that missing grades are missing grades, but missing bonus points should be replaced by zeros: ``` fixed_bonus_points = bonus_points.fillna(0) fixed_bonus_points.insert(0, "sep", 0) fixed_bonus_points.loc["alice"] = 0 grades + fixed_bonus_points ``` That's much better: although we made up some data, we have not been too unfair. Another way to handle missing data is to interpolate. Let's look at the `bonus_points` `DataFrame` again: ``` bonus_points ``` Now let's call the `interpolate` method. By default, it interpolates vertically (`axis=0`), so let's tell it to interpolate horizontally (`axis=1`). ``` bonus_points.interpolate(axis=1) ``` Bob had 0 bonus points in October, and 2 in December. When we interpolate for November, we get the mean: 1 bonus point. Colin had 1 bonus point in November, but we do not know how many bonus points he had in September, so we cannot interpolate, this is why there is still a missing value in October after interpolation. To fix this, we can set the September bonus points to 0 before interpolation. ``` better_bonus_points = bonus_points.copy() better_bonus_points.insert(0, "sep", 0) better_bonus_points.loc["alice"] = 0 better_bonus_points = better_bonus_points.interpolate(axis=1) better_bonus_points ``` Great, now we have reasonable bonus points everywhere. Let's find out the final grades: ``` grades + better_bonus_points ``` It is slightly annoying that the September column ends up on the right. This is because the `DataFrame`s we are adding do not have the exact same columns (the `grades` `DataFrame` is missing the `"dec"` column), so to make things predictable, pandas orders the final columns alphabetically. To fix this, we can simply add the missing column before adding: ``` grades["dec"] = np.nan final_grades = grades + better_bonus_points final_grades ``` There's not much we can do about December and Colin: it's bad enough that we are making up bonus points, but we can't reasonably make up grades (well I guess some teachers probably do). So let's call the `dropna()` method to get rid of rows that are full of `NaN`s: ``` final_grades_clean = final_grades.dropna(how="all") final_grades_clean ``` Now let's remove columns that are full of `NaN`s by setting the `axis` argument to `1`: ``` final_grades_clean = final_grades_clean.dropna(axis=1, how="all") final_grades_clean ``` ## Aggregating with `groupby` Similar to the SQL language, pandas allows grouping your data into groups to run calculations over each group. First, let's add some extra data about each person so we can group them, and let's go back to the `final_grades` `DataFrame` so we can see how `NaN` values are handled: ``` final_grades["hobby"] = ["Biking", "Dancing", np.nan, "Dancing", "Biking"] final_grades ``` Now let's group data in this `DataFrame` by hobby: ``` grouped_grades = final_grades.groupby("hobby") grouped_grades ``` We are ready to compute the average grade per hobby: ``` grouped_grades.mean() ``` That was easy! Note that the `NaN` values have simply been skipped when computing the means. ## Pivot tables Pandas supports spreadsheet-like [pivot tables](https://en.wikipedia.org/wiki/Pivot_table) that allow quick data summarization. To illustrate this, let's create a simple `DataFrame`: ``` bonus_points more_grades = final_grades_clean.stack().reset_index() more_grades.columns = ["name", "month", "grade"] more_grades["bonus"] = [np.nan, np.nan, np.nan, 0, np.nan, 2, 3, 3, 0, 0, 1, 0] more_grades ``` Now we can call the `pd.pivot_table()` function for this `DataFrame`, asking to group by the `name` column. By default, `pivot_table()` computes the mean of each numeric column: ``` pd.pivot_table(more_grades, index="name") ``` We can change the aggregation function by setting the `aggfunc` argument, and we can also specify the list of columns whose values will be aggregated: ``` pd.pivot_table(more_grades, index="name", values=["grade","bonus"], aggfunc=np.max) ``` We can also specify the `columns` to aggregate over horizontally, and request the grand totals for each row and column by setting `margins=True`: ``` pd.pivot_table(more_grades, index="name", values="grade", columns="month", margins=True) ``` Finally, we can specify multiple index or column names, and pandas will create multi-level indices: ``` pd.pivot_table(more_grades, index=("name", "month"), margins=True) ``` ## Overview functions When dealing with large `DataFrames`, it is useful to get a quick overview of its content. Pandas offers a few functions for this. First, let's create a large `DataFrame` with a mix of numeric values, missing values and text values. Notice how Jupyter displays only the corners of the `DataFrame`: ``` much_data = np.fromfunction(lambda x,y: (x+y*y)%17*11, (10000, 26)) large_df = pd.DataFrame(much_data, columns=list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")) large_df[large_df % 16 == 0] = np.nan large_df.insert(3,"some_text", "Blabla") large_df ``` The `head()` method returns the top 5 rows: ``` large_df.head() ``` Of course there's also a `tail()` function to view the bottom 5 rows. You can pass the number of rows you want: ``` large_df.tail(n=2) ``` The `info()` method prints out a summary of each columns contents: ``` large_df.info() ``` Finally, the `describe()` method gives a nice overview of the main aggregated values over each column: * `count`: number of non-null (not NaN) values * `mean`: mean of non-null values * `std`: [standard deviation](https://en.wikipedia.org/wiki/Standard_deviation) of non-null values * `min`: minimum of non-null values * `25%`, `50%`, `75%`: 25th, 50th and 75th [percentile](https://en.wikipedia.org/wiki/Percentile) of non-null values * `max`: maximum of non-null values ``` large_df.describe() ``` # Saving & loading Pandas can save `DataFrame`s to various backends, including file formats such as CSV, Excel, JSON, HTML and HDF5, or to a SQL database. Let's create a `DataFrame` to demonstrate this: ``` my_df = pd.DataFrame( [["Biking", 68.5, 1985, np.nan], ["Dancing", 83.1, 1984, 3]], columns=["hobby","weight","birthyear","children"], index=["alice", "bob"] ) my_df ``` ## Saving Let's save it to CSV, HTML and JSON: ``` my_df.to_csv("my_df.csv") my_df.to_html("my_df.html") my_df.to_json("my_df.json") ``` Done! Let's take a peek at what was saved: ``` for filename in ("my_df.csv", "my_df.html", "my_df.json"): print("#", filename) with open(filename, "rt") as f: print(f.read()) print() ``` Note that the index is saved as the first column (with no name) in a CSV file, as `<th>` tags in HTML and as keys in JSON. Saving to other formats works very similarly, but some formats require extra libraries to be installed. For example, saving to Excel requires the openpyxl library: ``` try: my_df.to_excel("my_df.xlsx", sheet_name='People') except ImportError as e: print(e) ``` ## Loading Now let's load our CSV file back into a `DataFrame`: ``` my_df_loaded = pd.read_csv("my_df.csv", index_col=0) my_df_loaded ``` As you might guess, there are similar `read_json`, `read_html`, `read_excel` functions as well. We can also read data straight from the Internet. For example, let's load all U.S. cities from [simplemaps.com](http://simplemaps.com/): ``` us_cities = None try: csv_url = "http://simplemaps.com/files/cities.csv" us_cities = pd.read_csv(csv_url, index_col=0) us_cities = us_cities.head() except IOError as e: print(e) us_cities ``` There are more options available, in particular regarding datetime format. Check out the [documentation](http://pandas.pydata.org/pandas-docs/stable/io.html) for more details. # Combining `DataFrame`s ## SQL-like joins One powerful feature of pandas is it's ability to perform SQL-like joins on `DataFrame`s. Various types of joins are supported: inner joins, left/right outer joins and full joins. To illustrate this, let's start by creating a couple simple `DataFrame`s: ``` city_loc = pd.DataFrame( [ ["CA", "San Francisco", 37.781334, -122.416728], ["NY", "New York", 40.705649, -74.008344], ["FL", "Miami", 25.791100, -80.320733], ["OH", "Cleveland", 41.473508, -81.739791], ["UT", "Salt Lake City", 40.755851, -111.896657] ], columns=["state", "city", "lat", "lng"]) city_loc city_pop = pd.DataFrame( [ [808976, "San Francisco", "California"], [8363710, "New York", "New-York"], [413201, "Miami", "Florida"], [2242193, "Houston", "Texas"] ], index=[3,4,5,6], columns=["population", "city", "state"]) city_pop ``` Now let's join these `DataFrame`s using the `merge()` function: ``` pd.merge(left=city_loc, right=city_pop, on="city") ``` Note that both `DataFrame`s have a column named `state`, so in the result they got renamed to `state_x` and `state_y`. Also, note that Cleveland, Salt Lake City and Houston were dropped because they don't exist in *both* `DataFrame`s. This is the equivalent of a SQL `INNER JOIN`. If you want a `FULL OUTER JOIN`, where no city gets dropped and `NaN` values are added, you must specify `how="outer"`: ``` all_cities = pd.merge(left=city_loc, right=city_pop, on="city", how="outer") all_cities ``` Of course `LEFT OUTER JOIN` is also available by setting `how="left"`: only the cities present in the left `DataFrame` end up in the result. Similarly, with `how="right"` only cities in the right `DataFrame` appear in the result. For example: ``` pd.merge(left=city_loc, right=city_pop, on="city", how="right") ``` If the key to join on is actually in one (or both) `DataFrame`'s index, you must use `left_index=True` and/or `right_index=True`. If the key column names differ, you must use `left_on` and `right_on`. For example: ``` city_pop2 = city_pop.copy() city_pop2.columns = ["population", "name", "state"] pd.merge(left=city_loc, right=city_pop2, left_on="city", right_on="name") ``` ## Concatenation Rather than joining `DataFrame`s, we may just want to concatenate them. That's what `concat()` is for: ``` result_concat = pd.concat([city_loc, city_pop]) result_concat ``` Note that this operation aligned the data horizontally (by columns) but not vertically (by rows). In this example, we end up with multiple rows having the same index (eg. 3). Pandas handles this rather gracefully: ``` result_concat.loc[3] ``` Or you can tell pandas to just ignore the index: ``` pd.concat([city_loc, city_pop], ignore_index=True) ``` Notice that when a column does not exist in a `DataFrame`, it acts as if it was filled with `NaN` values. If we set `join="inner"`, then only columns that exist in *both* `DataFrame`s are returned: ``` pd.concat([city_loc, city_pop], join="inner") ``` You can concatenate `DataFrame`s horizontally instead of vertically by setting `axis=1`: ``` pd.concat([city_loc, city_pop], axis=1) ``` In this case it really does not make much sense because the indices do not align well (eg. Cleveland and San Francisco end up on the same row, because they shared the index label `3`). So let's reindex the `DataFrame`s by city name before concatenating: ``` pd.concat([city_loc.set_index("city"), city_pop.set_index("city")], axis=1) ``` This looks a lot like a `FULL OUTER JOIN`, except that the `state` columns were not renamed to `state_x` and `state_y`, and the `city` column is now the index. The `append()` method is a useful shorthand for concatenating `DataFrame`s vertically: ``` city_loc.append(city_pop) ``` As always in pandas, the `append()` method does *not* actually modify `city_loc`: it works on a copy and returns the modified copy. # Categories It is quite frequent to have values that represent categories, for example `1` for female and `2` for male, or `"A"` for Good, `"B"` for Average, `"C"` for Bad. These categorical values can be hard to read and cumbersome to handle, but fortunately pandas makes it easy. To illustrate this, let's take the `city_pop` `DataFrame` we created earlier, and add a column that represents a category: ``` city_eco = city_pop.copy() city_eco["eco_code"] = [17, 17, 34, 20] city_eco ``` Right now the `eco_code` column is full of apparently meaningless codes. Let's fix that. First, we will create a new categorical column based on the `eco_code`s: ``` city_eco["economy"] = city_eco["eco_code"].astype('category') city_eco["economy"].cat.categories ``` Now we can give each category a meaningful name: ``` city_eco["economy"].cat.categories = ["Finance", "Energy", "Tourism"] city_eco ``` Note that categorical values are sorted according to their categorical order, *not* their alphabetical order: ``` city_eco.sort_values(by="economy", ascending=False) ``` # What next? As you probably noticed by now, pandas is quite a large library with *many* features. Although we went through the most important features, there is still a lot to discover. Probably the best way to learn more is to get your hands dirty with some real-life data. It is also a good idea to go through pandas' excellent [documentation](http://pandas.pydata.org/pandas-docs/stable/index.html), in particular the [Cookbook](http://pandas.pydata.org/pandas-docs/stable/cookbook.html).
github_jupyter
MASSACHUSETTS INSTITUTE OF TECHNOLOGY<br> SYSTEM DESIGN & MANAGEMENT<br> Author: R. Chadwick Holmes<br> Date: December 5, 2021<br><br> Script Purpose:<br> Script reads in well data set, expands by 4 or 8 using neighboring locations within some distance threshold (nominally, .01 degrees), and saves new "expanded" datasets out for use in modeling. <br><br> ``` !apt update !apt upgrade !apt install gdal-bin python-gdal python3-gdal # Install rtree - Geopandas requirment !apt install python3-rtree # Install Geopandas !pip install git+git://github.com/geopandas/geopandas.git # Install descartes - Geopandas requirment !pip install descartes cartopy !pip uninstall -y shapely !pip install shapely --no-binary shapely !pip install dataprep --no-binary dataprep ``` ### Mount Google Drive ``` from google.colab import drive drive.mount('/content/drive') path = r'/content/drive/MyDrive/Colab Notebooks/Data' ``` ### Load Key Libraries ``` import numpy as np import pandas as pd # provides interface for interacting with tabular data import geopandas as gpd # combines the capabilities of pandas and shapely for geospatial operations from shapely.geometry import Point, Polygon, MultiPolygon # for manipulating text data into geospatial shapes from shapely import wkt # stands for "well known text," allows for interchange across GIS programs import rtree # supports geospatial join from sklearn.preprocessing import StandardScaler from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.preprocessing import StandardScaler, PowerTransformer from pathlib import Path import pickle as pkl import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.colors import ListedColormap import seaborn as sns #from dataprep.eda import plot, plot_correlation, plot_missing, create_report ``` ### Load data into geopandas dataframe ``` datafolder = Path(path) wfile = r'SMU_wells_clipped2extent.csv' welldf = pd.read_csv(datafolder / wfile) welldf.shape ``` ### Generate new well file with additional points (N, S, E, W) ``` # drop any rows missing lat, lon and geothermal gradient values welldf = welldf.dropna(subset=['Latitude','Longitude','GeothermGrad']) welldf.shape # sort wells by gradient, drop duplicates and keep highest gradient observed welldf = welldf.sort_values(by=['Latitude','Longitude','GeothermGrad'],ascending=False).drop_duplicates(subset=['Latitude','Longitude'],keep='first') welldf.shape list(welldf) welldf['Latitude0'] = welldf['Latitude'] welldf['Longitude0'] = welldf['Longitude'] def addnewrow(df,rawrow,pluslat,pluslong): rawrow.Latitude += pluslat rawrow.Longitude += pluslong return df.append(rawrow) outdf = welldf.copy(deep=True).reset_index(drop=True) ind = outdf.index.copy(deep=True) print(outdf.shape) #### write out for use with ArcGIS print('saving original data...') outdf.to_csv(datafolder / 'well_locs_orig.csv') outdf = welldf.copy(deep=True).reset_index(drop=True) ind = outdf.index.copy(deep=True) print(outdf.shape) for i in ind: rawrow = outdf.loc[i,:].copy(deep=True) outdf = addnewrow(outdf,rawrow.copy(deep=True),0.01,0.00); outdf = addnewrow(outdf,rawrow.copy(deep=True),-0.01,0.00); outdf = addnewrow(outdf,rawrow.copy(deep=True),0.00,0.01); outdf = addnewrow(outdf,rawrow.copy(deep=True),0.00,-0.01); #### write out for use with ArcGIS print(outdf.shape) print('saving data with 4 new pseudowells...') outdf.to_csv(datafolder / 'well_locs_plus4.csv') outdf = welldf.copy(deep=True).reset_index(drop=True) ind = outdf.index.copy(deep=True) print(outdf.shape) for i in ind: rawrow = outdf.loc[i,:].copy(deep=True) outdf = addnewrow(outdf,rawrow.copy(deep=True),0.01,0.00); outdf = addnewrow(outdf,rawrow.copy(deep=True),-0.01,0.00); outdf = addnewrow(outdf,rawrow.copy(deep=True),0.00,0.01); outdf = addnewrow(outdf,rawrow.copy(deep=True),0.00,-0.01); outdf = addnewrow(outdf,rawrow.copy(deep=True),0.01,0.01); outdf = addnewrow(outdf,rawrow.copy(deep=True),-0.01,0.01); outdf = addnewrow(outdf,rawrow.copy(deep=True),0.01,-0.01); outdf = addnewrow(outdf,rawrow.copy(deep=True),-0.01,-0.01); #### write out for use with ArcGIS print(outdf.shape) print('saving data with 8 new pseudowells...') outdf.to_csv(datafolder / 'well_locs_plus8.csv') ```
github_jupyter