text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` import dgl import torch graphs, _ = dgl.load_graphs('/afs/inf.ed.ac.uk/user/s20/s2041332/mlp_project/graph/dev_graphs_200.dgl') print(len(graphs)) a = torch.tensor([[[0, 1], [1, 2], [2, 3]], [[3,4],[4,5],[5,6]]],dtype=torch.float) print(a.shape) print(a.mean(0).shape) print(a) torch.stack([a.mean(0)[0], a[0][1], a[-1][2]]) from RGCNLayer import * import numpy as np import dgl in_feat = 4 out_feat = 3 num_rels = 2 # Test data graphs = [] for _ in range(2): g = dgl.DGLGraph((np.random.randint(0, 3, (2,)), np.random.randint(0, 3, (2,)))) g.ndata['h'] = torch.randn(g.number_of_nodes(), in_feat) g.edata['rel_type'] = torch.randint(0, num_rels, (2,)) g.edata['norm'] = torch.randn(2) graphs.append(g) num_rels = 2 max_nodes = 3 def dgl2matrix(g): edge_pair = g.edges() us,vs = edge_pair norm_adj = torch.zeros((num_rels, max_nodes, max_nodes)) for i,(u,v) in enumerate(zip(us, vs)): norm_adj[g.edata['rel_type'][i]][u][v] = g.edata['e_weight'][i] return norm_adj bg = [] # Create two graphs with max nodes size g1 = dgl.DGLGraph() for n in range(max_nodes): g1.add_nodes(1, data={'n_embed':torch.tensor([[n, n, n, n]],dtype=torch.float)}) norm1 = torch.tensor([0.1, 0.1, 0.1, 0.1], dtype=torch.float) rel1 = torch.tensor([0, 1, 0, 1], dtype=torch.int) # g1: 0-1, 0-2 g1.add_edges([0, 0, 1, 2], [1, 2, 0, 0], data={'e_weight': norm1, 'rel_type': rel1}) adj1 = dgl.khop_adj(g1, 1) bg.append(g1) # g2: 1-2 g2 = dgl.DGLGraph() for n in range(max_nodes): g2.add_nodes(1, data={'n_embed':torch.tensor([[n, n, n, n]],dtype=torch.float)}) norm2 = torch.tensor([0.2, 0.2], dtype=torch.float) rel2 = torch.tensor([0, 0], dtype=torch.int) g2.add_edges([1, 2], [2, 1], data={'e_weight': norm2, 'rel_type': rel2}) adj2 = dgl.khop_adj(g2, 1) bg.append(g2) # print(g1.edata['e_weight']) b_h = [] b_norm_adj = [] for g in bg: norm_adj = dgl2matrix(g) h = g.ndata['n_embed'] b_h.append(h) b_norm_adj.append(norm_adj) batch_size = 2 layer = MyRGCNLayer(in_feat, out_feat, num_rels) print(b_h) # batch the input b_h = torch.stack(b_h) b_norm_adj = torch.stack(b_norm_adj) # print("(batch_size, num_rels, num_nodes, num_nodes):",b_norm_adj.shape) # print("[batch_size, num_nodes, in_dim]:", b_h.shape) # print(b_h) # b_h = b_h.expand(batch_size, 2, -1, -1) # print("after expand:",b_h.shape) # print(b_h) out = layer.forward(b_h, b_norm_adj) out.shape import torch import torch.nn as nn import torch.nn.functional as F from dgl import DGLGraph import dgl.function as fn from functools import partial from RGCNLayer import * class RGCNLayer(nn.Module): def __init__(self, in_feat, out_feat, num_rels, num_bases=-1, bias=None, activation=None, is_input_layer=False): super(RGCNLayer, self).__init__() self.in_feat = in_feat self.out_feat = out_feat self.num_rels = num_rels self.num_bases = num_bases self.bias = bias self.activation = activation self.is_input_layer = is_input_layer # sanity check if self.num_bases <= 0 or self.num_bases > self.num_rels: self.num_bases = self.num_rels # weight bases in equation (3) self.weight = nn.Parameter(torch.Tensor(self.num_bases, self.in_feat, self.out_feat)) if self.num_bases < self.num_rels: # linear combination coefficients in equation (3) self.w_comp = nn.Parameter(torch.Tensor(self.num_rels, self.num_bases)) # add bias if self.bias: self.bias = nn.Parameter(torch.Tensor(out_feat)) # init trainable parameters nn.init.xavier_uniform_(self.weight, gain=nn.init.calculate_gain('relu')) if self.num_bases < self.num_rels: nn.init.xavier_uniform_(self.w_comp, gain=nn.init.calculate_gain('relu')) if self.bias: nn.init.xavier_uniform_(self.bias, gain=nn.init.calculate_gain('relu')) def forward(self, g): if self.num_bases < self.num_rels: # generate all weights from bases (equation (3)) weight = self.weight.view(self.in_feat, self.num_bases, self.out_feat) weight = torch.matmul(self.w_comp, weight).view(self.num_rels, self.in_feat, self.out_feat) else: weight = self.weight if self.is_input_layer: def message_func(edges): # for input layer, matrix multiply can be converted to be # an embedding lookup using source node id embed = weight.view(-1, self.out_feat) index = edges.data['rel_type'] * self.in_feat + edges.src['id'] return {'msg': embed[index] * edges.data['norm']} else: def message_func(edges): w = weight[edges.data['rel_type']] msg = torch.bmm(edges.src['h'].unsqueeze(1), w).squeeze() msg = msg * edges.data['norm'] return {'msg': msg} def apply_func(nodes): h = nodes.data['h'] if self.bias: h = h + self.bias if self.activation: h = self.activation(h) return {'h': h} g.update_all(message_func, fn.sum(msg='msg', out='h'), apply_func) class Model(nn.Module): def __init__(self, num_nodes, h_dim, out_dim, num_rels, num_bases=-1, num_hidden_layers=1): super(Model, self).__init__() self.num_nodes = num_nodes self.h_dim = h_dim self.out_dim = out_dim self.num_rels = num_rels self.num_bases = num_bases self.num_hidden_layers = num_hidden_layers # create rgcn layers self.build_model() # create initial features self.features = self.create_features() def build_model(self): self.layers = nn.ModuleList() # input to hidden i2h = self.build_input_layer() self.layers.append(i2h) # hidden to hidden for _ in range(self.num_hidden_layers): h2h = self.build_hidden_layer() self.layers.append(h2h) # hidden to output h2o = self.build_output_layer() self.layers.append(h2o) # initialize feature for each node def create_features(self): features = torch.arange(self.num_nodes) return features def build_input_layer(self): return RGCNLayer(self.num_nodes, self.h_dim, self.num_rels, self.num_bases, activation=F.relu, is_input_layer=True) def build_hidden_layer(self): return RGCNLayer(self.h_dim, self.h_dim, self.num_rels, self.num_bases, activation=F.relu) def build_output_layer(self): return RGCNLayer(self.h_dim, self.out_dim, self.num_rels, self.num_bases, activation=partial(F.softmax, dim=1)) def forward(self, g): if self.features is not None: g.ndata['id'] = self.features for layer in self.layers: layer(g) return g.ndata.pop('h') # load graph data from dgl.contrib.data import load_data import numpy as np data = load_data(dataset='aifb') num_nodes = data.num_nodes num_rels = data.num_rels num_classes = data.num_classes labels = data.labels train_idx = data.train_idx # split training and validation set val_idx = train_idx[:len(train_idx) // 5] train_idx = train_idx[len(train_idx) // 5:] # edge type and normalization factor edge_type = torch.from_numpy(data.edge_type) edge_norm = torch.from_numpy(data.edge_norm).unsqueeze(1) labels = torch.from_numpy(labels).view(-1) # configurations n_hidden = 16 # number of hidden units n_bases = -1 # use number of relations as number of bases n_hidden_layers = 0 # use 1 input layer, 1 output layer, no hidden layer n_epochs = 25 # epochs to train lr = 0.01 # learning rate l2norm = 0 # L2 norm coefficient # create graph g = DGLGraph() g.add_nodes(num_nodes) g.add_edges(data.edge_src, data.edge_dst) g.edata.update({'rel_type': edge_type, 'norm': edge_norm}) # create model model = Model(len(g), n_hidden, num_classes, num_rels, num_bases=n_bases, num_hidden_layers=n_hidden_layers) # optimizer optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2norm) print("start training...") model.train() for epoch in range(n_epochs): optimizer.zero_grad() logits = model.forward(g) loss = F.cross_entropy(logits[train_idx], labels[train_idx]) loss.backward() optimizer.step() train_acc = torch.sum(logits[train_idx].argmax(dim=1) == labels[train_idx]) train_acc = train_acc.item() / len(train_idx) val_loss = F.cross_entropy(logits[val_idx], labels[val_idx]) val_acc = torch.sum(logits[val_idx].argmax(dim=1) == labels[val_idx]) val_acc = val_acc.item() / len(val_idx) print("Epoch {:05d} | ".format(epoch) + "Train Accuracy: {:.4f} | Train Loss: {:.4f} | ".format( train_acc, loss.item()) + "Validation Accuracy: {:.4f} | Validation loss: {:.4f}".format( val_acc, val_loss.item())) ```
github_jupyter
# MAT281 - Laboratorio 7 ## Aplicaciones de la Matemática en la Ingeniería ## __Intrucciones__ * Completa tus datos personales (nombre y rol USM). * Debes enviar este .ipynb con el siguiente formato de nombre: 07_formato_datos_NOMBRE_APELLIDO.ipynb con tus respuestas a alonso.ogueda@gmail.com y sebastian.flores@usm.cl . * Se evaluará: - Soluciones - Código - Al presionar `Kernel -> Restart Kernel and Run All Cells` deben ejecutarse todas las celdas sin error. - La escala es de 0 a 4 considerando solo valores enteros. * __La entrega es al final de esta clase.__ __Nombre__: __Rol__: ## ¿Qué contenido aprenderemos? Estimar el error al aplicar uno o varios modelos a los datos, utilizando la técnica de **Holdout Set**. ## Motivación Resulta imposible conocer a priori que modelo explicará de mejor manera un cierto conjunto de datos. Las técnicas de Holdout Set (y Cross Validation) permiten estimar el error que posee el modelo. ## Problema: Abalone Dataset Los datos Abalone Dataset corresponden a medidas físicas de abulones u orejas marinas (abalones), una especia de lapa o caracol marino comestibles. Este set de datos fue descrito por Sam Waugh para su tesis de doctorado, en la cual utilizó los datos para ilustrar el comportamiento de algoritmos de clasificación. Desde entonces, se ha utilizado para verificar algoritmos de clasificación y regresión. <img src="images/abalone.jpg" alt="" width="600px" align="middle"/> La base de datos contiene mediciones a 4177 abalones, donde las mediciones posibles son sexo ($S$), peso entero $W_1$, peso sin concha $W_2$, peso de visceras $W_3$, peso de concha $W_4$, largo ($L$), diametro $D$, altura $H$, y el número de anillos $N$. Buscaremos **predecir el número de anillos**, utilizando las otras variables. ## Modelos propuestos Los modelos propuestos son los siguientes: #### Modelo A Consideramos 9 parámetros, llamados $\alpha_i$, para el siguiente modelo: $$ \log(A) = \alpha_0 + \alpha_1 W_1 + \alpha_2 W_2 +\alpha_3 W_3 +\alpha_4 W_4 + \alpha_5 S + \alpha_6 \log L + \alpha_7 \log D+ \alpha_8 \log H$$ #### Modelo B Consideramos 6 parámetros, llamados $\beta_i$, para el siguiente modelo: $$ \log(A) = \beta_0 + \beta_1 W_1 + \beta_2 W_2 +\beta_3 W_3 +\beta W_4 + \beta_5 \log( L D H ) $$ #### Modelo C Consideramos 12 parámetros, llamados $\theta_i^{k}$, con $k \in \{M, F, I\}$, para el siguiente modelo: Si $S=male$: $$ \log(A) = \theta_0^M + \theta_1^M W_2 + \theta_2^M W_4 + \theta_3^M \log( L D H ) $$ Si $S=female$ $$ \log(A) = \theta_0^F + \theta_1^F W_2 + \theta_2^F W_4 + \theta_3^F \log( L D H ) $$ Si $S=indefined$ $$ \log(A) = \theta_0^I + \theta_1^I W_2 + \theta_2^I W_4 + \theta_3^I \log( L D H ) $$ ***¿Cómo podríamos de manera científica saber cuál de los modelos planteados representa de mejor manera al conjunto de datos?*** ## 1. Descargando los datos Utilizaremos los datos del dataset que se pueden descargar desde el siguiente link: http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data ## 2. Leyendo los datos A continuación se le provee cierto código para que lea los datos desde el archivo data/abalone.data.txt y los cargue en un arreglo en numpy. ``` %%bash head data/abalone.data.txt import numpy as np # Cargando los datos data = [] fh = open("data/abalone.data.txt","r") # Estructura de datos: # 'sex','length','diameter','height','weight.whole','weight.shucked','weight.viscera','weight.shell','rings' # Ejemplo de linea conversion_sexo = {"M":+1, "I":0, "F":-1} # Para Male, Incognito y Female for line in fh: abalone = line.split(",") abalone[0] = conversion_sexo[abalone[0]] data.append([float(x) for x in abalone]) fh.close() # Convertir lista a array data = np.array(data) # Limpiando datos erroneos (todos los valores excepto sexo deben ser estricamente positivos) mask = np.all(data[:,1:]>0, axis=1) abalone_data = data[mask] # Imprimir datos (opcional) #print data[:10] ``` ## Desafío 1 **1.** ¿Que significado tiene la siguiente expresión en el código para cargar los datos? ¿Porqué se realiza? abalone[0] = conversion_sexo[abalone[0]] *Respuesta*: ... **2.** ¿Resulta relevante que la codificación del sexo del abalone sea '"M":+1, "I":0, "F":-1' y no, por ejemplo otros valores como, '"M":0, "I":5, "F":45'? *Respuesta*: ... ## 3. Exploración visual de los datos A continuación se le provee código para generar visualizaciones de los datos. Ejecute secuencialmente e interprete los gráficos. ``` from matplotlib import pyplot as plt def plot(data, i, j): label = ['Sexo', 'Largo', 'Diametro', 'Altura', 'Peso Entero', 'Peso Desconchado', 'Peso Viscera', 'Peso Concha', 'Numero Anillos'] M_mask = data[:,0] == +1 I_mask = data[:,0] == 0 F_mask = data[:,0] == -1 plt.figure(figsize=(16,8)) plt.plot(data[:,i][M_mask], data[:,j][M_mask], "og", label="M") plt.plot(data[:,i][F_mask], data[:,j][F_mask], "sr", label="F") plt.plot(data[:,i][I_mask], data[:,j][I_mask], "<b", label="I") plt.xlabel(label[i]) plt.ylabel(label[j]) plt.legend() plt.show() """ Sandbox: Cambie los parámetros para obtener distintos gráficos Indices: 0:'Sexo', 1:'Largo', 2:'Diametro', 3:'Altura', 4:'Peso Entero', 5:'Peso Desconchado', 6:'Peso Viscera', 7:'Peso Concha', 8:'Numero Anillos' """ plot(abalone_data, 1, 8) ``` ## Desafío 2 **1.** ¿Cómo se reparten los estados M, F e I? ¿Podemos prescindir de los datos con estado "I"? *Respuesta*: ## 4. Holdout Set La técnica del holdout set separa los datos en 2 grupos. * El primero de los grupos permitirá entrenar el modelo y obtener los parámetros del modelo. * El segundo de los grupos se utilizará obtener una estimación del error (predictivo) del modelo. <img src="images/HoldoutSet.png" alt="" width="600px" align="middle"/> ## Características * Típicamente se separa el 30 % de los datos para el testeo del modelo. * Esto significa que el modelo se entrena en un conjunto menor de datos, y por tanto el modelo no será tan bueno como si se entrenara en el conjunto completo de datos. * El modelo se entrena en el conjunto de Training, sin utilizar ningun ejemplo del conjunto de datos Test. * El evaluar el modelo entrenado en los datos del conjunto de Test, el modelo se está aplicando por primera vez en esos datos, y por tanto, la estimación del error no continene bias. ## Importante * El procedimiento estándar es reportar el error entregado en el set de predicción, pero luego volver a entregar el modelo utilizando todos los datos simultáneamente. La partición Training-Test se realiza para tener una estimación del error. * El error reportado es una estimación conservativa, puesto que al entrenar en todos los datos el error del modelo actual típicamente disminuirá... ¡pero ya no tenemos datos para hacer una mejor estimación! * Predicciones conservativas son mejores que predicciones optimistas. ## Ejemplo Ilustraremos el funcionamiento del método con datos sintéticos: $$ y(x) = 5 \cos \Big( \frac{\pi}{4} x \Big) + \mathcal{N}\Big(0,1\Big)$$ Buscaremos ajustar un modelo del tipo $$ y(x) = a \cos \Big( b x + c\Big) + d$$ minimizando el error cuadrático. El error predictivo del modelo será calculado utilizando RMSE (Root Mean Square Error): $$ E(o,p) = \sqrt{ \frac{1}{N}\sum_{i=1}^N (o_i - p_i)^2 }$$ El RMSE corresponde a la desviación estándar de los residuos. ## Implementación Numérica de holdout set La implementación numérica del holdout set no depende del modelo a ajustar: puede realizarse para regresion lineal o logística, modelos discretos, y cualquier otro algoritmos de machine learning, etc. Los pasos son: 1. Realizar la partición en datos de entrenamiento y de predicción. 2. Obtener los coeficientes del modelo utilizando los datos de entrenamiento. 3. Utilizar el modelo entrenado para realizar predicción de datos en el set de predicción. 4. Comparar la predicción realizada con los datos reales (conocidos en el set de predicción), para estimar el error de predicción. 5. Obtener los coeficientes del modelo todos los datos de entrenamiento. En general la fórmula del error a calcular viene dado de manera natural por el tipo de problema y modelo utilizado. No siempre es el mismo. ``` import model # Local model.py import numpy as np import os # Load data filenames = ["dataN50.txt", "dataN500.txt", "dataN5000.txt"] data = model.load_data(os.path.join("data",filenames[0])) # use 0, 1 or 2 N = data.shape[0] split = int(0.7*N) # Change here # Permute the data np.random.seed(23) # Change here data = np.random.permutation(data) # Do the split training_data = data[:split,:] testing_data = data[split:,:] # Train model excluding the holdout set training_params = model.get_params(training_data) # Test with the holdout set prediction_error = model.get_error(training_params, testing_data) print("Prediction error estimated on {:.5} ".format(prediction_error)) # Train model with all the data all_data_params = model.get_params(data) # Report model.full_report(training_data, testing_data, training_params, all_data_params) # Plot the model model.plot(training_data, testing_data, training_params, all_data_params) ``` ## Desafío 2: ¿Que puede decir respecto al comportamiento del error al usar holdout set con los archivos con 50, 500 y 5000 elementos? **R:** ... ## 5. Entrenando y Testeando los Modelos A continuación se entrega el código necesario para entrenar los distintos modelos y realizar una predicción. Se proveen ejemplos de utilización. #### Modelo A $$ \log(A) = \alpha_0 + \alpha_1 W_1 + \alpha_2 W_2 +\alpha_3 W_3 +\alpha_4 W_4 + \alpha_5 S + \alpha_6 \log L + \alpha_7 \log D+ \alpha_8 \log H$$ ``` # 'sex','length','diameter','height','weight.whole','weight.shucked','weight.viscera','weight.shell','rings' # Entrenando el modelo A def train_model_A(data): y = np.log(data[:,-1]) X = data.copy() X[:,0] = 1.0 X[:,1:4] = np.log(X[:,1:4]) coeffs = np.linalg.lstsq(X, y)[0] return coeffs # Testeando el modelo A def test_model_A(data, coeffs): X = data.copy() X[:,0] = 1.0 X[:,1:4] = np.log(X[:,1:4]) ln_anillos = np.dot(X, coeffs) return np.exp(ln_anillos) # Obtener valores y prediccion coeffs_A = train_model_A(abalone_data) y_pred = test_model_A(abalone_data, coeffs_A) # Mostrar graficamente y_data = abalone_data[:,-1] plt.figure(figsize=(10,10)) plt.title("") plt.plot(y_data, y_pred, "x") plt.plot(y_data, y_data, "k-") plt.xlabel("Número de anillos real") plt.ylabel("Número de anillos predicho") plt.show() ``` #### Modelo B $$ \log(A) = \beta_0 + \beta_1 W_1 + \beta_2 W_2 +\beta_3 W_3 +\beta W_4 + \beta_5 \log( L D H ) $$ ``` # 'sex','length','diameter','height','weight.whole','weight.shucked','weight.viscera','weight.shell','rings' # Entrenando el modelo B def train_model_B(data): y = np.log(data[:,-1]) X = np.ones([data.shape[0],6]) X[:,0] = 1.0 X[:,1:5] = data[:,4:8] X[:,5] = np.log(data[:,1]*data[:,2]*data[:,3]) coeffs = np.linalg.lstsq(X, y)[0] return coeffs # Testeando el modelo B def test_model_B(data, coeffs): X = np.ones([data.shape[0],6]) X[:,0] = 1.0 X[:,1:5] = data[:,4:8] X[:,5] = np.log(data[:,1]*data[:,2]*data[:,3]) ln_anillos = np.dot(X, coeffs) return np.round(np.exp(ln_anillos)) # Obtener valores y prediccion coeffs_B = train_model_B(abalone_data) y_pred = test_model_B(abalone_data, coeffs_B) # Mostrar graficamente plt.figure(figsize=(10,10)) plt.plot(y_data, y_pred, "x") plt.plot(y_data, y_data, "k-") plt.show() ``` #### Modelo C Si $S=male$: $$ \log(A) = \theta_0^M + \theta_1^M W_2 + \theta_2^M W_4 + \theta_3^M \log( L D H ) $$ Si $S=female$ $$ \log(A) = \theta_0^F + \theta_1^F W_2 + \theta_2^F W_4 + \theta_3^F \log( L D H ) $$ Si $S=indefined$ $$ \log(A) = \theta_0^I + \theta_1^I W_2 + \theta_2^I W_4 + \theta_3^I \log( L D H ) $$ ``` # 'sex','length','diameter','height','weight.whole','weight.shucked','weight.viscera','weight.shell','rings' # Entrenando el modelo C def train_model_C(data): mask_I = data[:,0] == 0 mask_M = data[:,0] == +1 mask_F = data[:,0] == -1 y = np.log(data[:,-1]) X = np.ones([data.shape[0], 4]) X[:,0] = 1.0 X[:,1] = data[:,5] X[:,2] = data[:,7] X[:,3] = np.log(data[:,1]*data[:,2]*data[:,3]) coeffs_I = np.linalg.lstsq(X[mask_I], y[mask_I])[0] coeffs_M = np.linalg.lstsq(X[mask_M], y[mask_M])[0] coeffs_F = np.linalg.lstsq(X[mask_F], y[mask_F])[0] return (coeffs_I, coeffs_M, coeffs_F) # Testeando el modelo C def test_model_C(data, coeffs): mask_I = data[:,0] == 0 mask_M = data[:,0] == +1 mask_F = data[:,0] == -1 y = np.log(data[:,-1]) X = np.ones([data.shape[0], 4]) X[:,0] = 1.0 X[:,1] = data[:,5] X[:,2] = data[:,7] X[:,3] = np.log(data[:,1]*data[:,2]*data[:,3]) # Fill up the solution ln_anillos = np.zeros(data[:,0].shape) ln_anillos[mask_I] = np.dot(X[mask_I], coeffs[0]) ln_anillos[mask_M] = np.dot(X[mask_M], coeffs[1]) ln_anillos[mask_F] = np.dot(X[mask_F], coeffs[-1]) return np.round(np.exp(ln_anillos)) # Obtener valores y prediccion coeffs_C = train_model_C(abalone_data) y_pred = test_model_C(abalone_data, coeffs_C) # Mostrar graficamente plt.figure(figsize=(10,10)) plt.plot(y_data, y_pred, "x") plt.plot(y_data, y_data, "k-") plt.show() ``` ## Desafío 3 Realice un gráfico en el cual se comparan simultáneamente el número de anillos reales vs el número de anillos estimados con los modelos A, B y C, únicamente para el caso de los abalones de sexo masculino. ``` # Realice aqui su grafico plt.figure(figsize=(16,8)) plt.plot() plt.show() ``` ## 5. Obteniendo el error de medición Utilice Holdout Set para obtener una estimación razonable del error predictivo de los modelos A, B y C. ``` # Implemente aquí su algoritmo para obtener el error predictivo de los métodos # # 1. Divida los datos en entramiento y test # 2. Entrene los modelos # 3. Obtenga la predicción de los modelos # 4. Calcule el error de los modelos ``` ## Desafíos Finales #### 1. ¿Cuál es el error predictivo tiene el modelo A? # FIX ME #### 2. ¿Cuál es el error predictivo tiene el modelo B? # FIX ME #### 3. ¿Cuál es el error predictivo tiene el modelo C? # FIX ME #### 4. ¿Cuál modelo resulta mejor? ¿Cuál es su explicación? # FIX ME #### 5. ¿Existe algún modelo que sea dimensionablemente consistente? # FIX ME
github_jupyter
<a href="https://colab.research.google.com/github/fabxy/course-content-dl/blob/main/tutorials/W2D5_GenerativeModels/W2D5_Tutorial3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Tutorial 3: Conditional GANs and Implications of GAN Technology **Week 2, Day 5: Generative Models** **By Neuromatch Academy** __Content creators:__ Seungwook Han, Kai Xu, Akash Srivastava __Content reviewers:__ Polina Turishcheva, Melvin Selim Atay, Hadi Vafaei, Deepak Raya, Kelson Shilling-Scrivo __Content editors:__ Spiros Chavlis __Production editors:__ Arush Tagade, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p> --- ## Tutorial Objectives The goal of this notebook is to understand conditional GANs. Then you will have the opportunity to experience first-hand how effective GANs are at modeling the data distribution and to question what the consequences of this technology may be. By the end of this tutorial you will be able to: - Understand the differences in conditional GANs. - Generate high-dimensional natural images from a BigGAN. - Understand the efficacy of GANs in modeling the data distribution (e.g., faces). - Understand the energy inefficiency / environmental impact of training these large generative models. - Understand the implications of this technology (ethics, environment, *etc*.). ``` # @title Tutorial slides # @markdown These are the slides for the videos in this tutorial # @markdown If you want to locally download the slides, click [here](https://osf.io/ps28k/download) from IPython.display import IFrame IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/ps28k/?direct%26mode=render%26action=download%26mode=render", width=854, height=480) ``` --- # Setup ``` # @title Install dependencies # @markdown Install Huggingface BigGAN library !pip install pytorch-pretrained-biggan --quiet !pip install Pillow libsixel-python --quiet !pip install nltk --quiet !pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet from evaltools.airtable import AirtableForm # generate airtable form atform = AirtableForm('appn7VdPRseSoMXEG','W2D5_T3','https://portal.neuromatchacademy.org/api/redirect/to/438d4b27-5872-499a-8f72-6a79ff2a2aa6') # Imports import torch import torchvision import numpy as np import matplotlib.pyplot as plt from pytorch_pretrained_biggan import BigGAN from pytorch_pretrained_biggan import one_hot_from_names from pytorch_pretrained_biggan import truncated_noise_sample # @title Figure settings import ipywidgets as widgets # interactive display %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle") # @title Set random seed # @markdown Executing `set_seed(seed=seed)` you are setting the seed # for DL its critical to set the random seed so that students can have a # baseline to compare their results to expected results. # Read more here: https://pytorch.org/docs/stable/notes/randomness.html # Call `set_seed` function in the exercises to ensure reproducibility. import random import torch def set_seed(seed=None, seed_torch=True): if seed is None: seed = np.random.choice(2 ** 32) random.seed(seed) np.random.seed(seed) if seed_torch: torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True print(f'Random seed {seed} has been set.') # In case that `DataLoader` is used def seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 np.random.seed(worker_seed) random.seed(worker_seed) # @title Set device (GPU or CPU). Execute `set_device()` # especially if torch modules used. # inform the user if the notebook uses GPU or CPU. def set_device(): device = "cuda" if torch.cuda.is_available() else "cpu" if device != "cuda": print("WARNING: For this notebook to perform best, " "if possible, in the menu under `Runtime` -> " "`Change runtime type.` select `GPU` ") else: print("GPU is enabled in this notebook.") return device SEED = 2021 set_seed(seed=SEED) DEVICE = set_device() # @title Download `wordnet` dataset # import nltk # nltk.download('wordnet') import requests, zipfile fname = 'wordnet.zip' url = 'https://osf.io/ekjxy/download' r = requests.get(url, allow_redirects=True) with open('wordnet.zip', 'wb') as fd: fd.write(r.content) with zipfile.ZipFile(fname, 'r') as zip_ref: zip_ref.extractall('/root/nltk_data/corpora') ``` --- # Section 1: Generating with a conditional GAN (BigGAN) ``` # @title Video 1: Conditional Generative Models from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1f54y1E79D", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"lV6zH2xDZck", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=lV6zH2xDZck" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add event to airtable atform.add_event('Video 1: Conditional Generative Models') display(out) ``` In this section, we will load a pre-trained conditional GAN, BigGAN, which is the state-of-the-art model in conditional high-dimensional natural image generation, and generate samples from it. Since it is a class conditional model, we will be able to use the class label to generate images from the different classes of objects. Read here for more details on BigGAN: https://arxiv.org/pdf/1809.11096.pdf ``` # Load respective BigGAN model for the specified resolution (biggan-deep-128, biggan-deep-256, biggan-deep-512) def load_biggan(model_res): return BigGAN.from_pretrained('biggan-deep-{}'.format(model_res)) # Create class and noise vectors for sampling from BigGAN def create_class_noise_vectors(class_str, trunc, num_samples): class_vector = one_hot_from_names([class_str]*num_samples, batch_size=num_samples) noise_vector = truncated_noise_sample(truncation=trunc, batch_size=num_samples) return class_vector, noise_vector # Generate samples from BigGAN def generate_biggan_samples(model, class_vector, noise_vector, device, truncation=0.4): # Convert to tensor noise_vector = torch.from_numpy(noise_vector) class_vector = torch.from_numpy(class_vector) # Move to GPU noise_vector = noise_vector.to(device) class_vector = class_vector.to(device) model.to(device) # Generate an image with torch.no_grad(): output = model(noise_vector, class_vector, truncation) # Back to CPU output = output.to('cpu') # The output layer of BigGAN has a tanh layer, resulting the range of [-1, 1] for the output image # Therefore, we normalize the images properly to [0, 1] range. # Clipping is only in case of numerical instability problems output = torch.clip(((output.detach().clone() + 1) / 2.0), 0, 1) output = output # Make grid and show generated samples output_grid = torchvision.utils.make_grid(output, nrow=min(4, output.shape[0]), padding=5) plt.imshow(output_grid.permute(1, 2, 0)) return output_grid def generate(b): # Create BigGAN model model = load_biggan(MODEL_RESOLUTION) # Use specified parameters (resolution, class, number of samples, etc) to generate from BigGAN class_vector, noise_vector = create_class_noise_vectors(CLASS, TRUNCATION, NUM_SAMPLES) samples_grid = generate_biggan_samples(model, class_vector, noise_vector, DEVICE, TRUNCATION) torchvision.utils.save_image(samples_grid, 'samples.png') ### If CUDA out of memory issue, lower NUM_SAMPLES (number of samples) ``` ## Section 1.1: Define configurations We will now define the configurations (resolution of model, number of samples, class to sample from, truncation level) under which we will sample from BigGAN. ***Question***: What is the truncation trick employed by BigGAN? How does sample variety and fidelity change by varying the truncation level? (Hint: play with the truncation slider and try sampling at different levels) ``` # @title { run: "auto" } ### RUN THIS BLOCK EVERY TIME YOU CHANGE THE PARAMETERS FOR GENERATION # Resolution at which to generate MODEL_RESOLUTION = "128" # @param [128, 256, 512] # Number of images to generate NUM_SAMPLES = 4 # @param {type:"slider", min:4, max:12, step:4} # Class of images to generate CLASS = 'German shepherd' # @param ['tench', 'magpie', 'jellyfish', 'German shepherd', 'bee', 'acoustic guitar', 'coffee mug', 'minibus', 'monitor'] # Truncation level of the normal distribution we sample z from TRUNCATION = 0.8 # @param {type:"slider", min:0.1, max:1, step:0.1} # Create generate button, given parameters specified above button = widgets.Button(description="GENERATE!", layout=widgets.Layout(width='30%', height='80px'), button_style='danger') output = widgets.Output() display(button, output) button.on_click(generate) ``` ## Think! 1: BigGANs 1. How does BigGAN differ from previous state-of-the-art generative models for high-dimensional natural images? In other words, how does BigGAN solve high-dimensional image generation? (Hint: look into model architecture and training configurations) (BigGAN paper: https://arxiv.org/pdf/1809.11096.pdf) 2. Continuing from Question 1, what are the drawbacks of introducing such techniques into training large models for high-dimensional, diverse datasets? 3. Play with other pre-trained generative models like StyleGAN here -- where code for sampling and interpolation in the latent space is available: https://github.com/NVlabs/stylegan ``` # @title Student Response from ipywidgets import widgets text=widgets.Textarea( value='Type answer here and Push submit', placeholder='Type something', description='', disabled=False ) button = widgets.Button(description="Submit!") display(text,button) def on_button_clicked(b): atform.add_answer('q1' , text.value) print("Submission successful!") button.on_click(on_button_clicked) # to_remove explanation """ 1 - Very large mini-batch sizes (>= 2048) -- along with synchronous batch normalization across all GPUs / mini-batches to address the high diversity of the ImageNet dataset - Increased number of parameters (at least double of previous state-of-the-art models like SAGAN) (from increased widths of the layers) - Spectral normalization, orthogonal regularization penalty, and dropouts in final layer of D to stabilize the highly unstable training of BigGAN. Even with these stabilization techniques, BigGAN still suffers from chronic instability (as described in the paper) and requires multiple restarts. The model almost always collapses at some point in the training, but the key is to stop the training before it does. 2. Huge compute requirement to allow for large mini-batch sizes and therefore high energy consumption 3. N/A """; ``` --- # Section 2: Ethical issues ``` # @title Video 2: Ethical Issues from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV11L411H7pr", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"ZtWFeUZgfVk", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add event to airtable atform.add_event('Video 2: Ethical Issues') display(out) ``` ## Section 2.1: Faces Quiz Now is your turn to test your abilities on recognizing a real vs. a fake image! ``` # @markdown Real or Fake? from IPython.display import IFrame IFrame(src='https://docs.google.com/forms/d/e/1FAIpQLSeGjn2S2bn6Q1qWjVgDS5LG7G1GsQQh2Q0T9dEUO1z5_W0yYg/viewform?usp=sf_link', width=900, height=600) ``` ## Section 2.2: Energy Efficiency Quiz ``` # @markdown Make a guess IFrame(src='https://docs.google.com/forms/d/e/1FAIpQLSe8suNt4ZmadSr_6IWq6s_nUYxC1VCpjR2cBBmQ7cR_5znCZw/viewform?usp=sf_link', width=900, height=600) ``` --- # Summary Hooray! You have finished the second week of NMA-DL course!!! In the first section of this tutorial, we have learned: - How conditional GANs differ from unconditional models - How to use a pre-trained BigGAN model to generate high-dimensional photo-realistic images and its tricks to modulate diversity and image fidelity In the second section, we learned about the broader ethical implications of GAN technology on society through deepfakes and their tremendous energy inefficiency. On the brighter side, as we learned throughout the week, GANs are very effective in modeling the data distribution and have many practical applications. For example, as personalized healthcare and applications of AI in healthcare rise, the need to remove any Personally Identifiable Information (PII) becomes more important. As shown in [Piacentino and Angulo, 2020](https://doi.org/10.1007/978-3-030-45385-5_36), GANs can be leveraged to anonymize healthcare data. As a food for thought, what are some other practical applications of GANs that you can think of? Discuss with your pod your ideas. ``` # @title Video 3: Recap and advanced topics from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1Uo4y1D7Nj", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"7nUjFG3N04I", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add event to airtable atform.add_event('Video 3: Recap and advanced topics') display(out) # @title Airtable Submission Link from IPython import display as IPydisplay IPydisplay.HTML( f""" <div> <a href= "{atform.url()}" target="_blank"> <img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1" alt="button link end of day Survey" style="width:410px"></a> </div>""" ) ```
github_jupyter
# High-level PyTorch Example ``` import os import sys import numpy as np import math import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data as data_utils import torch.nn.init as init from torch.autograd import Variable from common.params import * from common.utils import * # Big impact on training-time (from 350 to 165s) torch.backends.cudnn.benchmark=True # enables cudnn's auto-tuner print("OS: ", sys.platform) print("Python: ", sys.version) print("PyTorch: ", torch.__version__) print("Numpy: ", np.__version__) print("GPU: ", get_gpu_name()) class SymbolModule(nn.Module): def __init__(self): super(SymbolModule, self).__init__() self.conv1 = nn.Conv2d(3, 50, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(50, 50, kernel_size=3, padding=1) self.conv3 = nn.Conv2d(50, 100, kernel_size=3, padding=1) self.conv4 = nn.Conv2d(100, 100, kernel_size=3, padding=1) # feature map size is 8*8 by pooling self.fc1 = nn.Linear(100*8*8, 512) self.fc2 = nn.Linear(512, N_CLASSES) def forward(self, x): """ PyTorch requires a flag for training in dropout """ x = self.conv2(F.relu(self.conv1(x))) x = F.relu(F.max_pool2d(x, kernel_size=2, stride=2)) x = F.dropout(x, 0.25, training=self.training) x = self.conv4(F.relu(self.conv3(x))) x = F.relu(F.max_pool2d(x, kernel_size=2, stride=2)) x = F.dropout(x, 0.25, training=self.training) x = x.view(-1, 100*8*8) # reshape Variable x = F.dropout(F.relu(self.fc1(x)), 0.5, training=self.training) # nn.CrossEntropyLoss() contains softmax, don't apply twice #return F.log_softmax(x) return self.fc2(x) def init_model(m): # Implementation of momentum: # v = \rho * v + g \\ # p = p - lr * v opt = optim.SGD(m.parameters(), lr=LR, momentum=MOMENTUM) criterion = nn.CrossEntropyLoss() return opt, criterion %%time # Data into format for library x_train, x_test, y_train, y_test = cifar_for_library(channel_first=True) # Torch-specific y_train = y_train.astype(np.int64) y_test = y_test.astype(np.int64) print(x_train.shape, x_test.shape, y_train.shape, y_test.shape) print(x_train.dtype, x_test.dtype, y_train.dtype, y_test.dtype) %%time sym = SymbolModule() sym.cuda() # CUDA! %%time optimizer, criterion = init_model(sym) %%time # 169s # Sets training = True sym.train() for j in range(EPOCHS): for data, target in yield_mb(x_train, y_train, BATCHSIZE, shuffle=True): # Get samples data = Variable(torch.FloatTensor(data).cuda()) target = Variable(torch.LongTensor(target).cuda()) # Init optimizer.zero_grad() # Forwards output = sym(data) # Loss loss = criterion(output, target) # Back-prop loss.backward() optimizer.step() # Log print(j) %%time # Test model # Sets training = False sym.eval() n_samples = (y_test.shape[0]//BATCHSIZE)*BATCHSIZE y_guess = np.zeros(n_samples, dtype=np.int) y_truth = y_test[:n_samples] c = 0 for data, target in yield_mb(x_test, y_test, BATCHSIZE): # Get samples data = Variable(torch.FloatTensor(data).cuda()) # Forwards output = sym(data) pred = output.data.max(1)[1].cpu().numpy().squeeze() # Collect results y_guess[c*BATCHSIZE:(c+1)*BATCHSIZE] = pred c += 1 print("Accuracy: ", sum(y_guess == y_truth)/len(y_guess)) ```
github_jupyter
``` import dash import dash_core_components as dcc import dash_html_components as html import plotly.graph_objs as go import plotly.figure_factory as FF from datetime import datetime import glob import os.path import pymysql import sqlconfig # From sqlconfig.py import pandas as pd import sqlalchemy import psycopg2 from tqdm import tqdm print("Import Complete") passwd = sqlconfig.passwd # From sqlconfig.py user = sqlconfig.user # From sqlconfig.py DB = 'NewLab' #name of databases to activate engine = sqlalchemy.create_engine('postgresql+psycopg2://'+user+':'+passwd+'@35.221.58.17/'+DB) base_path = os.path.dirname(os.path.dirname(os.path.abspath(os. getcwd()))) start_path = os.path.join(base_path,"Plotly_dash","CSV", "2interim","2_cbas_post_SD_resample","resampled(5T)") start_path fn = 'protoCBAS-*' path = sorted(glob.glob(os.path.join(start_path, fn))) path dfs = [pd.read_csv(f, parse_dates=["timestamp"], index_col=["timestamp"]).assign(sensor=f) for f in path] dfs[0].sensor ## filtering directory/file extensions stripboard = ((len(start_path))) # getting the length of the path up to where glob fills in filenames for d in dfs: d.sensor = d.sensor.str.slice(start=stripboard+1).str.replace(".csv", "") availablecolumns = pd.Series(dfs[0].columns) ``` ### SQL setup create engine for CBAS db ``` [d.columns = map(str.lower, d.columns) for d in dfs] # Lowercase column names dfs[0].columns ``` Write to DB ``` dfs[2].to_sql('cbas',engine,if_exists='append',index_label='timestamp') print("FINITO") dfs[3].to_sql('cbas',engine,if_exists='append',index_label='timestamp') print("FINITO") dfs[4].to_sql('cbas',engine,if_exists='append',index_label='timestamp') print("FINITO") print(engine.table_names()) ``` Read from DB ``` query= ''' SELECT * FROM cbas ''' CBAStest = pd.read_sql(query,engine,parse_dates=["timestamp"], index_col=["timestamp"]) CBAStest ``` ## Import comfort metrics ``` passwd = sqlconfig.passwd # From sqlconfig.py user = 'sm' # From sqlconfig.py DB = 'NewLab' #name of databases to activate sqlconfig.user engine = sqlalchemy.create_engine('postgresql+psycopg2://'+user+':'+passwd+'@35.221.58.17/'+DB) base_path = os.path.dirname(os.path.dirname(os.path.abspath(os. getcwd()))) start_path = os.path.join(base_path,"Plotly_dash","CSV", "1Processed","NewLab_run") start_path fn = 'protoCBAS-*' path = sorted(glob.glob(os.path.join(start_path, fn))) path dfs = [pd.read_csv(f, parse_dates=["timestamp"], index_col=["timestamp"]).assign(sensor=f) for f in path] ## filtering directory/file extensions stripboard = ((len(start_path))) # getting the length of the path up to where glob fills in filenames for d in dfs: d.sensor = d.sensor.str.slice(start=stripboard+1).str.replace(".csv", "") dfs[0].sensor availablecolumns = pd.Series(dfs[0].columns) availablecolumns dfs[0].head(5) ``` In sql client: ```sql CREATE TABLE cbasnl(); ``` ``` #for x in dfs: # x = x.rename({"Wkdy(EST)": "Wkdy", "Hour(EST)":"Hour", "Month(EST)":"Month", "TOD(EST)":"TOD", "DOY(EST)":"DOY"}, axis='columns') dfs[4] = dfs[4].rename({"Wkdy(EST)": "Wkdy", "Hour(EST)":"Hour", "Month(EST)":"Month", "TOD(EST)":"TOD", "DOY(EST)":"DOY"}, axis='columns') dfs[4]["Wkdy"] [d.to_sql('cbasnl',engine,if_exists='append',index_label='timestamp') for d in tqdm(dfs,desc="Uploading to DB...")] ```
github_jupyter
``` cd /content/drive/My Drive/Spam Classifier import nltk import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import train_test_split from nltk.corpus import stopwords from nltk.stem import PorterStemmer,WordNetLemmatizer from nltk.tokenize import word_tokenize import sklearn.metrics as m from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier nltk.download('punkt') nltk.download('stopwords') nltk.download('wordnet') dataset=pd.read_csv('spam.csv',encoding='latin-1') dataset sent=dataset.iloc[:,[1]]['v2'] sent label=dataset.iloc[:,[0]]['v1'] label from sklearn.preprocessing import LabelEncoder le=LabelEncoder() label=le.fit_transform(label) label le.classes_ import re len(set(stopwords.words('english'))) stem=PorterStemmer() sent sentences=[] for sen in sent: senti=re.sub('[^A-Za-z]',' ',sen) senti=senti.lower() words=word_tokenize(senti) word=[stem.stem(i) for i in words if i not in stopwords.words('english')] senti=' '.join(word) sentences.append(senti) sentences from sklearn.feature_extraction.text import TfidfVectorizer tfidf=TfidfVectorizer(max_features=5000) features=tfidf.fit_transform(sentences) features=features.toarray() features len(tfidf.get_feature_names()) tfidf.get_feature_names() feature_train,feature_test,label_train,label_test=train_test_split(features,label,test_size=0.2,random_state=7) ``` #Naive Bayies ``` model=MultinomialNB() model.fit(feature_train,label_train) label_pred=model.predict(feature_test) label_pred label_test m.accuracy_score(label_test,label_pred) print(m.classification_report(label_test,label_pred)) print(m.confusion_matrix(label_test,label_pred)) ``` #SVC ``` model=SVC(kernel='linear') model.fit(feature_train,label_train) label_pred=model.predict(feature_test) m.accuracy_score(label_test,label_pred) label_pred label_test print(m.classification_report(label_test,label_pred)) print(m.confusion_matrix(label_test,label_pred)) ``` #LogisticRegression ``` model=LogisticRegression() model.fit(feature_train,label_train) label_pred=model.predict(feature_test) m.accuracy_score(label_test,label_pred) label_pred label_test print(m.classification_report(label_test,label_pred)) print(m.confusion_matrix(label_test,label_pred)) ``` #Decision Tree ``` model=DecisionTreeClassifier() model.fit(feature_train,label_train) label_pred=model.predict(feature_test) m.accuracy_score(label_test,label_pred) label_pred label_test print(m.classification_report(label_test,label_pred)) print(m.confusion_matrix(label_test,label_pred)) ```
github_jupyter
``` # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Vertex client library for Python: Custom training tabular regression model for batch prediction with explanation <table align="left"> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/official/explainable_ai/gapic-custom_tabular_regression_batch_explain.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/official/explainable_ai/gapic-custom_tabular_regression_batch_explain.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> </table> <br/><br/><br/> ## Overview This tutorial demonstrates how to use the Vertex client library for Python to train and deploy a custom tabular regression model for batch prediction with explanation. ### Dataset The dataset used for this tutorial is the [Boston Housing Prices dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html). The version of the dataset you will use in this tutorial is built into TensorFlow. The trained model predicts the median price of a house in units of 1K USD. ### Objective In this tutorial, you create a custom trained model, with a training pipeline, from a Python script in a Google prebuilt Docker container using the Vertex client library for Python, and then do a batch prediction with explanations on the uploaded model. Alternatively, you can create custom trained models using `gcloud` command-line tool or online using Cloud Console. The steps performed include: - Create a Vertex custom job for training a model. - Train the TensorFlow model. - Retrieve and load the model artifacts. - View the model evaluation. - Set explanation parameters. - Upload the model as a Vertex `Model` resource. - Make a batch prediction with explanations. ### Costs This tutorial uses billable components of Google Cloud (GCP): * Vertex AI * Cloud Storage Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage pricing](https://cloud.google.com/storage/pricing), and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage. ## Installation Install the latest version of Vertex client library for Python. ``` import sys import os # Google Cloud Notebook if os.path.exists("/opt/deeplearning/metadata/env_version"): USER_FLAG = '--user' else: USER_FLAG = '' ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG ``` Install the latest GA version of *google-cloud-storage* library as well. ``` ! pip3 install -U google-cloud-storage $USER_FLAG ``` ### Install other packages Install other packages required for this tutorial. ``` ! pip3 install -U tabulate $USER_FLAG ``` ### Restart the kernel Once you've installed the Vertex client library for Python and *google-cloud-storage* library, you need to restart the notebook kernel so it can find the packages. ``` if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` ## Before you begin ### GPU runtime *Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** ### Set up your Google Cloud project **The following steps are required, regardless of your notebook environment.** 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs. 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project) 3. [Enable the Vertex AI APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component) 4. [The Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook. 5. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`. ``` PROJECT_ID = "[your-project-id]" #@param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID ``` #### Region You can also change the `REGION` variable, which is used for operations throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you. - Americas: `us-central1` - Europe: `europe-west4` - Asia Pacific: `asia-east1` You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services. Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations) ``` REGION = 'us-central1' #@param {type: "string"} ``` #### Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial. ``` from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ``` ### Authenticate your Google Cloud account **If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. **Otherwise**, follow these steps: In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page. **Click Create service account**. In the **Service account name** field, enter a name, and click **Create**. In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**. Click Create. A JSON file that contains your key downloads to your local environment. Enter the path to your service account key as the `GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell. ``` # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your Google Cloud account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on Google Cloud Notebook, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your Google Cloud # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' ``` ### Create a Cloud Storage bucket **The following steps are required, regardless of your notebook environment.** When you submit a custom training job using the Vertex client library for Python, you upload a Python package containing your training code to a Cloud Storage bucket. Vertex runs the code from this package. In this tutorial, Vertex also saves the trained model that results from your job in the same bucket. You can then create an `Endpoint` resource based on this output in order to serve online predictions. Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization. ``` BUCKET_NAME = "gs://[your-bucket-name]" #@param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP ``` **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ``` ! gsutil mb -l $REGION $BUCKET_NAME ``` Finally, validate access to your Cloud Storage bucket by examining its contents: ``` ! gsutil ls -al $BUCKET_NAME ``` ### Set up variables Next, set up some variables used throughout the tutorial. ### Import libraries and define constants #### Import Vertex client library for Python Import the Vertex client library for Python into your Python environment. ``` import time import google.cloud.aiplatform_v1beta1 as aip from google.protobuf import json_format from google.protobuf.struct_pb2 import Value from google.protobuf.struct_pb2 import Struct from google.protobuf.json_format import MessageToJson from google.protobuf.json_format import ParseDict ``` #### Vertex AI constants Setup up the following constants for Vertex AI: - `API_ENDPOINT`: The Vertex AI service endpoint for dataset, model, job, pipeline and endpoint services. - `PARENT`: The Vertex AI location root path for `Dataset`, `Model`, `Job`, `Pipeline` and `Endpoint` resources. ``` # API service endpoint API_ENDPOINT = "{0}-aiplatform.googleapis.com".format(REGION) # Vertex AI location root path for your dataset, model and endpoint resources PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION ``` #### Set hardware accelerators You can set hardware accelerators for training and prediction. Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4) Otherwise specify `(None, None)` to use a container image to run on a CPU. Learn [which accelerators are available in your region](https://cloud.google.com/vertex-ai/docs/general/locations#accelerators). *Note*: TF releases before 2.3 for GPU support will fail to load the custom trained model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom trained models, use a container image for TF 2.3 with GPU support. ``` if os.getenv("IS_TESTING_TRAIN_GPU"): TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_TRAIN_GPU"))) else: TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1) if os.getenv("IS_TESTING_DEPLOY_GPU"): DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_DEPLOY_GPU"))) else: DEPLOY_GPU, DEPLOY_NGPU = (None, None) ``` #### Set pre-built containers Set the pre-built Docker container image for training and prediction. For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers). For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers) ``` if os.getenv("IS_TESTING_TF"): TF = os.getenv("IS_TESTING_TF") else: TF = '2-1' if TF[0] == '2': if TRAIN_GPU: TRAIN_VERSION = 'tf-gpu.{}'.format(TF) else: TRAIN_VERSION = 'tf-cpu.{}'.format(TF) if DEPLOY_GPU: DEPLOY_VERSION = 'tf2-gpu.{}'.format(TF) else: DEPLOY_VERSION = 'tf2-cpu.{}'.format(TF) else: if TRAIN_GPU: TRAIN_VERSION = 'tf-gpu.{}'.format(TF) else: TRAIN_VERSION = 'tf-cpu.{}'.format(TF) if DEPLOY_GPU: DEPLOY_VERSION = 'tf-gpu.{}'.format(TF) else: DEPLOY_VERSION = 'tf-cpu.{}'.format(TF) TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION) DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION) print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU) print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU) ``` #### Set machine type Next, set the machine type to use for training and prediction. - Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \] *Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs *Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*. Learn [which machine types are available for training](https://cloud.google.com/vertex-ai/docs/training/configure-compute) and [for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute) ``` if os.getenv("IS_TESTING_TRAIN_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE") else: MACHINE_TYPE = 'n1-standard' VCPU = '4' TRAIN_COMPUTE = MACHINE_TYPE + '-' + VCPU print('Train machine type', TRAIN_COMPUTE) if os.getenv("IS_TESTING_DEPLOY_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE") else: MACHINE_TYPE = 'n1-standard' VCPU = '4' DEPLOY_COMPUTE = MACHINE_TYPE + '-' + VCPU print('Deploy machine type', DEPLOY_COMPUTE) ``` # Tutorial Now you are ready to start creating your own custom model and training for Boston Housing. ## Set up clients The Vertex client library for Python works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex AI server. You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront. - Model Service for `Model` resources. - Endpoint Service for deployment. - Job Service for batch jobs and custom training. - Prediction Service for serving. ``` # client options same for all services client_options = {"api_endpoint": API_ENDPOINT} def create_job_client(): client = aip.JobServiceClient( client_options=client_options ) return client def create_model_client(): client = aip.ModelServiceClient( client_options=client_options ) return client def create_endpoint_client(): client = aip.EndpointServiceClient( client_options=client_options ) return client def create_prediction_client(): client = aip.PredictionServiceClient( client_options=client_options ) return client clients = {} clients['job'] = create_job_client() clients['model'] = create_model_client() clients['endpoint'] = create_endpoint_client() clients['prediction'] = create_prediction_client() for client in clients.items(): print(client) ``` ## Train a model There are two ways you can train a custom model using a container image: - **Use a Google Cloud prebuilt container**. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model. - **Use your own custom container image**. If you use your own container, the container needs to contain your code for training a custom model. ## Prepare your custom job specification Now that your clients are ready, your first step is to create a Job Specification for your custom training job. The job specification will consist of the following: - `worker_pool_spec` : The specification of the type of machine(s) you will use for training and how many (single or distributed) - `python_package_spec` : The specification of the Python package to be installed with the pre-built container. ### Prepare your machine specification Now define the machine specification for your custom training job. This tells Vertex AI what type of machine instance to provision for the training. - `machine_type`: The type of Google Cloud instance to provision -- e.g., n1-standard-8. - `accelerator_type`: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable `TRAIN_GPU != None`, you are using a GPU; otherwise you will use a CPU. - `accelerator_count`: The number of accelerators. ``` if TRAIN_GPU: machine_spec = { "machine_type": TRAIN_COMPUTE, "accelerator_type": TRAIN_GPU, "accelerator_count": TRAIN_NGPU } else: machine_spec = { "machine_type": TRAIN_COMPUTE, "accelerator_count": 0 } ``` ### Prepare your disk specification (optional) Now define the disk specification for your custom training job. This tells Vertex AI what type and size of disk to provision in each machine instance for the training. - `boot_disk_type`: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD. - `boot_disk_size_gb`: Size of disk in GB. ``` DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard] DISK_SIZE = 200 # GB disk_spec = { "boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE } ``` ### Define the worker pool specification Next, you define the worker pool specification for your custom training job. The worker pool specification will consist of the following: - `replica_count`: The number of instances to provision of this machine type. - `machine_spec`: The hardware specification. - `disk_spec` : (optional) The disk storage specification. - `python_package`: The Python training package to install on the VM instance(s) and which Python module to invoke, along with command line arguments for the Python module. Let's dive deeper now into the python package specification: -`executor_image_spec`: This is the docker image which is configured for your custom training job. -`package_uris`: This is a list of the locations (URIs) of your python training packages to install on the provisioned instance. The locations need to be in a Cloud Storage bucket. These can be either individual python files or a zip (archive) of an entire package. In the later case, the job service will unzip (unarchive) the contents into the docker image. -`python_module`: The Python module (script) to invoke for running the custom training job. In this example, you will be invoking `trainer.task.py` -- note that it was not neccessary to append the `.py` suffix. -`args`: The command line arguments to pass to the corresponding Pythom module. In this example, you will be setting: - `"--model-dir=" + MODEL_DIR` : The Cloud Storage location where to store the model artifacts. There are two ways to tell the training script where to save the model artifacts: - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `"--epochs=" + EPOCHS`: The number of epochs for training. - `"--steps=" + STEPS`: The number of steps (batches) per epoch. - `"--distribute=" + TRAIN_STRATEGY"` : The training distribution strategy to use for single or distributed training. - `"single"`: single device. - `"mirror"`: all GPU devices on a single compute instance. - `"multi"`: all GPU devices on all compute instances. ``` JOB_NAME = "custom_job_" + TIMESTAMP MODEL_DIR = '{}/{}'.format(BUCKET_NAME, JOB_NAME) if not TRAIN_NGPU or TRAIN_NGPU < 2: TRAIN_STRATEGY = "single" else: TRAIN_STRATEGY = "mirror" EPOCHS = 20 STEPS = 100 DIRECT = True if DIRECT: CMDARGS = [ "--model-dir=" + MODEL_DIR, "--epochs=" + str(EPOCHS), "--steps=" + str(STEPS), "--distribute=" + TRAIN_STRATEGY ] else: CMDARGS = [ "--epochs=" + str(EPOCHS), "--steps=" + str(STEPS), "--distribute=" + TRAIN_STRATEGY ] worker_pool_spec = [ { "replica_count": 1, "machine_spec": machine_spec, "disk_spec": disk_spec, "python_package_spec": { "executor_image_uri": TRAIN_IMAGE, "package_uris": [BUCKET_NAME + "/trainer_boston.tar.gz"], "python_module": "trainer.task", "args": CMDARGS, } } ] ``` ### Assemble a job specification Now assemble the complete description for the custom job specification: - `display_name`: The human readable name you assign to this custom job. - `job_spec`: The specification for the custom job. - `worker_pool_specs`: The specification for the machine VM instances. - `base_output_directory`: This tells the service the Cloud Storage location where to save the model artifacts (when variable `DIRECT = False`). The service will then pass the location to the training script as the environment variable `AIP_MODEL_DIR`, and the path will be of the form: <output_uri_prefix>/model ``` if DIRECT: job_spec = { "worker_pool_specs": worker_pool_spec } else: job_spec = { "worker_pool_specs": worker_pool_spec, "base_output_directory": {"output_uri_prefix": MODEL_DIR} } custom_job = { "display_name": JOB_NAME, "job_spec": job_spec } ``` ### Examine the training package #### Package layout Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout. - PKG-INFO - README.md - setup.cfg - setup.py - trainer - \_\_init\_\_.py - task.py The files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image. The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). #### Package Assembly In the following cells, you will assemble the training package. ``` # Make folder for Python training script ! rm -rf custom ! mkdir custom # Add package information ! touch custom/README.md setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0" ! echo "$setup_cfg" > custom/setup.cfg setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())" ! echo "$setup_py" > custom/setup.py pkg_info = "Metadata-Version: 1.0\n\nName: Boston Housing tabular regression\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex" ! echo "$pkg_info" > custom/PKG-INFO # Make the training subfolder ! mkdir custom/trainer ! touch custom/trainer/__init__.py ``` #### Task.py contents In the next cell, you write the contents of the training script task.py. I won't go into detail, it's just there for you to browse. In summary: - Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`. - Loads Boston Housing dataset from TF.Keras builtin datasets - Builds a simple deep neural network model using TF.Keras model API. - Compiles the model (`compile()`). - Sets a training distribution strategy according to the argument `args.distribute`. - Trains the model (`fit()`) with epochs specified by `args.epochs`. - Saves the trained model (`save(args.model_dir)`) to the specified model directory. - Saves the maximum value for each feature `f.write(str(params))` to the specified parameters file. ``` %%writefile custom/trainer/task.py # Single, Mirror and Multi-Machine Distributed Training for Boston Housing import tensorflow_datasets as tfds import tensorflow as tf from tensorflow.python.client import device_lib import numpy as np import argparse import os import sys tfds.disable_progress_bar() parser = argparse.ArgumentParser() parser.add_argument('--model-dir', dest='model_dir', default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.') parser.add_argument('--lr', dest='lr', default=0.001, type=float, help='Learning rate.') parser.add_argument('--epochs', dest='epochs', default=20, type=int, help='Number of epochs.') parser.add_argument('--steps', dest='steps', default=100, type=int, help='Number of steps per epoch.') parser.add_argument('--distribute', dest='distribute', type=str, default='single', help='distributed training strategy') parser.add_argument('--param-file', dest='param_file', default='/tmp/param.txt', type=str, help='Output file for parameters') args = parser.parse_args() print('Python Version = {}'.format(sys.version)) print('TensorFlow Version = {}'.format(tf.__version__)) print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found'))) # Single Machine, single compute device if args.distribute == 'single': if tf.test.is_gpu_available(): strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") else: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") # Single Machine, multiple compute device elif args.distribute == 'mirror': strategy = tf.distribute.MirroredStrategy() # Multiple Machine, multiple compute device elif args.distribute == 'multi': strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() # Multi-worker configuration print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync)) def make_dataset(): # Scaling Boston Housing data features def scale(feature): max = np.max(feature) feature = (feature / max).astype(np.float) return feature, max (x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data( path="boston_housing.npz", test_split=0.2, seed=113 ) params = [] for _ in range(13): x_train[_], max = scale(x_train[_]) x_test[_], _ = scale(x_test[_]) params.append(max) # store the normalization (max) value for each feature with tf.io.gfile.GFile(args.param_file, 'w') as f: f.write(str(params)) return (x_train, y_train), (x_test, y_test) # Build the Keras model def build_and_compile_dnn_model(): model = tf.keras.Sequential([ tf.keras.layers.Dense(128, activation='relu', input_shape=(13,)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(1, activation='linear') ]) model.compile( loss='mse', optimizer=tf.keras.optimizers.RMSprop(learning_rate=args.lr)) return model NUM_WORKERS = strategy.num_replicas_in_sync # Here the batch size scales up by number of workers since # `tf.data.Dataset.batch` expects the global batch size. BATCH_SIZE = 16 GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS with strategy.scope(): # Creation of dataset, and model building/compiling need to be within # `strategy.scope()`. model = build_and_compile_dnn_model() # Train the model (x_train, y_train), (x_test, y_test) = make_dataset() model.fit(x_train, y_train, epochs=args.epochs, batch_size=GLOBAL_BATCH_SIZE) model.save(args.model_dir) ``` #### Store training script on your Cloud Storage bucket Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket. ``` ! rm -f custom.tar custom.tar.gz ! tar cvf custom.tar custom ! gzip custom.tar ! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_boston.tar.gz ``` ### Train the model Now start the training of your custom training job on Vertex AI. Use this helper function `create_custom_job`, which takes the following parameter: -`custom_job`: The specification for the custom job. The helper function calls job client service's `create_custom_job` method, with the following parameters: -`parent`: The Vertex AI location path to `Dataset`, `Model` and `Endpoint` resources. -`custom_job`: The specification for the custom job. You will display a handful of the fields returned in `response` object, with the two that are of most interest are: -`response.name`: The Vertex AI fully qualified identifier assigned to this custom training job. You save this identifier for using in subsequent steps. -`response.state`: The current state of the custom training job. ``` def create_custom_job(custom_job): response = clients['job'].create_custom_job(parent=PARENT, custom_job=custom_job) print("name:", response.name) print("display_name:", response.display_name) print("state:", response.state) print("create_time:", response.create_time) print("update_time:", response.update_time) return response response = create_custom_job(custom_job) ``` Now get the unique identifier for the custom job you created. ``` # The full unique ID for the custom job job_id = response.name # The short numeric ID for the custom job job_short_id = job_id.split('/')[-1] print(job_id) ``` ### Get information on a custom job Next, use this helper function `get_custom_job`, which takes the following parameter: - `name`: The Vertex AI fully qualified identifier for the custom job. The helper function calls the job client service's `get_custom_job` method, with the following parameter: - `name`: The Vertex AI fully qualified identifier for the custom job. If you recall, you got the Vertex AI fully qualified identifier for the custom job in the `response.name` field when you called the `create_custom_job` method, and saved the identifier in the variable `job_id`. ``` def get_custom_job(name, silent=False): response = clients['job'].get_custom_job(name=name) if silent: return response print("name:", response.name) print("display_name:", response.display_name) print("state:", response.state) print("create_time:", response.create_time) print("update_time:", response.update_time) return response response = get_custom_job(job_id) ``` # Deploy the model Training the above model may take upwards of 20 minutes time. Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, we will need to know the location of the saved model, which the Python script saved in your local Cloud Storage bucket at `MODEL_DIR + '/saved_model.pb'`. ``` while True: response = get_custom_job(job_id, True) if response.state != aip.JobState.JOB_STATE_SUCCEEDED: print("Training job has not completed:", response.state) model_path_to_deploy = None if response.state == aip.JobState.JOB_STATE_FAILED: break else: if not DIRECT: MODEL_DIR = MODEL_DIR + "/model" model_path_to_deploy = MODEL_DIR print("Training Job Time:", response.end_time - response.start_time) print("Training Elapsed Time:", response.update_time - response.create_time) break time.sleep(60) print("model_to_deploy:", model_path_to_deploy) ``` ## Load the saved model Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction. To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`. ``` import tensorflow as tf model = tf.keras.models.load_model(MODEL_DIR) ``` ## Evaluate the model Now let's find out how good the model is. ### Load evaluation data You will load the Boston Housing test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This returns the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the feature data, and the corresponding labels (median value of owner-occupied home). You don't need the training data, and hence why we loaded it as `(_, _)`. Before you can run the data through evaluation, you need to preprocess it: `x_test`: 1. Normalize (rescale) the data in each column by dividing each value by the maximum value of that column. This replaces each single value with a 32-bit floating point number between 0 and 1. ``` from tensorflow.keras.datasets import boston_housing import numpy as np (_, _), (x_test, y_test) = boston_housing.load_data( path="boston_housing.npz", test_split=0.2, seed=113) def scale(feature): max = np.max(feature) feature = (feature / max).astype(np.float32) return feature # Let's save one data item that has not been scaled x_test_notscaled = x_test[0:1].copy() for _ in range(13): x_test[_] = scale(x_test[_]) x_test = x_test.astype(np.float32) print(x_test.shape, x_test.dtype, y_test.shape) print("scaled", x_test[0]) print("unscaled", x_test_notscaled) ``` ### Perform the model evaluation Now evaluate how well the model in the custom job did. ``` model.evaluate(x_test, y_test) ``` ## Upload the model for serving Next, you will upload your TF.Keras model from the custom job to Vertex `Model` service, which will create a Vertex `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex AI, your serving function ensures that the data is decoded on the model server before it is passed as input to your model. ### How does the serving function work When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`. The serving function consists of two parts: - `preprocessing function`: - Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph). - Performs the same preprocessing of the data that was done during training the underlying model, including normalizing and scalingh. - `post-processing function`: - Converts the model output to format expected by the receiving application -- e.q., compresses the output. - Packages the output for the the receiving application, including adding headings, and making JSON objects. Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content. One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported. ## Get the serving function signature You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer. When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request. You also need to know the name of the serving function's input and output layer for constructing the explanation metadata -- which is discussed subsequently. ``` loaded = tf.saved_model.load(model_path_to_deploy) serving_input = list(loaded.signatures['serving_default'].structured_input_signature[1].keys())[0] print('Serving function input:', serving_input) serving_output = list(loaded.signatures['serving_default'].structured_outputs.keys())[0] print('Serving function output:', serving_output) input_name = model.input.name print('Model input name:', input_name) output_name = model.output.name print('Model output name:', output_name) ``` ### Explanation Specification To get explanations when doing a prediction, you must enable the explanation capability and set corresponding settings when you upload your custom model to a Vertex `Model` resource. These settings are referred to as the explanation metadata, which consists of: - `parameters`: This is the specification for the explainability algorithm to use for explanations on your model. You can choose between: - Shapley - *Note*, not recommended for image data -- can be very long running - XRAI - Integrated Gradients - `metadata`: This is the specification for how the algoithm is applied on your custom model. #### Explanation Parameters Let's first dive deeper into the settings for the explainability algorithm. #### Shapley Assigns credit for the outcome to each feature, and considers different permutations of the features. This method provides a sampling approximation of exact Shapley values. Use Cases: - Classification and regression on tabular data. Parameters: - `path_count`: This is the number of paths over the features that will be processed by the algorithm. An exact approximation of the Shapley values requires M! paths, where M is the number of features. For the CIFAR10 dataset, this would be 784 (28*28). For any non-trival number of features, this is too compute expensive. You can reduce the number of paths over the features to M * `path_count`. #### Integrated Gradients A gradients-based method to efficiently compute feature attributions with the same axiomatic properties as the Shapley value. Use Cases: - Classification and regression on tabular data. - Classification on image data. Parameters: - `step_count`: This is the number of steps to approximate the remaining sum. The more steps, the more accurate the integral approximation. The general rule of thumb is 50 steps, but as you increase so does the compute time. #### XRAI Based on the integrated gradients method, XRAI assesses overlapping regions of the image to create a saliency map, which highlights relevant regions of the image rather than pixels. Use Cases: - Classification on image data. Parameters: - `step_count`: This is the number of steps to approximate the remaining sum. The more steps, the more accurate the integral approximation. The general rule of thumb is 50 steps, but as you increase so does the compute time. In the next code cell, set the variable `XAI` to which explainabilty algorithm you will use on your custom model. ``` XAI = "ig" # [ shapley, ig, xrai ] if XAI == "shapley": PARAMETERS = { "sampled_shapley_attribution": {'path_count': 10} } elif XAI == "ig": PARAMETERS = { "integrated_gradients_attribution": {"step_count": 50} } elif XAI == "xrai": PARAMETERS = { "xrai_attribution": {"step_count": 50} } parameters = aip.ExplanationParameters(PARAMETERS) ``` #### Explanation Metadata Let's first dive deeper into the explanation metadata, which consists of: - `outputs`: A scalar value in the output to attribute -- what to explain. For example, in a probability output \[0.1, 0.2, 0.7\] for classification, one wants an explanation for 0.7. Consider the following formulae, where the output is `y` and that is what we want to explain. y = f(x) Consider the following formulae, where the outputs are `y` and `z`. Since we can only do attribution for one scalar value, we have to pick whether we want to explain the output `y` or `z`. Assume in this example the model is object detection and y and z are the bounding box and the object classification. You would want to pick which of the two outputs to explain. y, z = f(x) The dictionary format for `outputs` is: { "outputs": { "[your_display_name]": "output_tensor_name": [layer] } } <div style="margin-left: 25px;"> <ul> <li>[your_display_name]: A human readable name you assign to the output to explain. A common example is "probability".</li> <li>"output_tensor_name": The key/value field to identify the output layer to explain. </li> <li>[layer]: The output layer to explain. In a single task model, like a tabular regressor, it is the last (topmost) layer in the model</li>. </ul> </div> - `inputs`: The features for attribution -- how they contributed to the output. Consider the following formulae, where `a` and `b` are the features. We have to pick which features to explain how the contributed. Assume that this model is deployed for A/B testing, where `a` are the data_items for the prediction and `b` identifies whether the model instance is A or B. You would want to pick `a` (or some subset of) for the features, and not `b` since it does not contribute to the prediction. y = f(a,b) The minimum dictionary format for `inputs` is: { "inputs": { "[your_display_name]": "input_tensor_name": [layer] } } <div style="margin-left: 25px;"> <ul> <li>[your_display_name]: A human readable name you assign to the input to explain. A common example is "features".</li> <li>"input_tensor_name": The key/value field to identify the input layer for the feature attribution. </li> <li>[layer]: The input layer for feature attribution. In a single input tensor model, it is the first (bottom-most) layer in the model.</li> </ul> </div> Since the inputs to the model are tabular, you can specify the following two additional fields as reporting/visualization aids: <div style="margin-left: 25px;"> <ul> <li>"encoding": "BAG_OF_FEATURES" : Indicates that the inputs are set of tabular features.</li> <li>"index_feature_mapping": [ feature-names ] : A list of human readable names for each feature. For this example, we use the feature names specified in the dataset.</li> <li>"modality": "numeric": Indicates the field values are numeric.</li> </ul> </div> ``` INPUT_METADATA = {'input_tensor_name': serving_input, "encoding": "BAG_OF_FEATURES", "modality": "numeric", "index_feature_mapping": ["crim", "zn", "indus", "chas", "nox", "rm", "age", "dis", "rad", "tax", "ptratio", "b", "lstat"] } OUTPUT_METADATA = {'output_tensor_name': serving_output} input_metadata = aip.ExplanationMetadata.InputMetadata(INPUT_METADATA) output_metadata = aip.ExplanationMetadata.OutputMetadata(OUTPUT_METADATA) metadata = aip.ExplanationMetadata( inputs = {'features': input_metadata}, outputs = {'medv' : output_metadata} ) explanation_spec = aip.ExplanationSpec( metadata=metadata, parameters=parameters ) ``` ### Upload the model Use this helper function `upload_model` to upload your model, stored in SavedModel format, up to the `Model` service, which will instantiate a Vertex `Model` resource instance for your model. Once you've done that, you can use the `Model` resource instance in the same way as any other Vertex `Model` resource instance, such as deploying to an `Endpoint` resource for serving predictions. The helper function takes the following parameters: - `display_name`: A human readable name for the `Endpoint` service. - `image_uri`: The container image for the model deployment. - `model_uri`: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the `trainer/task.py` saved the model artifacts, which we specified in the variable `MODEL_DIR`. The helper function calls the `Model` client service's method `upload_model`, which takes the following parameters: - `parent`: The Vertex AI location root path for `Dataset`, `Model` and `Endpoint` resources. - `model`: The specification for the Vertex `Model` resource instance. Let's now dive deeper into the Vertex model specification `model`. This is a dictionary object that consists of the following fields: - `display_name`: A human readable name for the `Model` resource. - `metadata_schema_uri`: Since your model was built without an Vertex `Dataset` resource, you will leave this blank (`''`). - `artifact_uri`: The Cloud Storage path where the model is stored in SavedModel format. - `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the `Model` resource will serve predictions. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. - `explanation_spec`: This is the specification for enabling explainability for your model. Uploading a model into a Vertex `Model` resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the Vertex Model resource is ready. The helper function returns the Vertex AI fully qualified identifier for the corresponding Vertex `Model` instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id. ``` IMAGE_URI = DEPLOY_IMAGE def upload_model(display_name, image_uri, model_uri): model = aip.Model(display_name=display_name, artifact_uri=model_uri, metadata_schema_uri="", explanation_spec=explanation_spec, container_spec={ "image_uri": image_uri }) response = clients['model'].upload_model(parent=PARENT, model=model) print("Long running operation:", response.operation.name) upload_model_response = response.result(timeout=180) print("upload_model_response") print(" model:", upload_model_response.model) return upload_model_response.model model_to_deploy_id = upload_model("boston-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy) ``` ### Get `Model` resource information Now let's get the model information for just your model. Use this helper function `get_model`, with the following parameter: - `name`: The Vertex AI unique identifier for the `Model` resource. This helper function calls the Vertex AI `Model` client service's method `get_model`, with the following parameter: - `name`: The Vertex AI unique identifier for the `Model` resource. ``` def get_model(name): response = clients['model'].get_model(name=name) print(response) get_model(model_to_deploy_id) ``` ## Model deployment for batch prediction Now deploy the trained Vertex `Model` resource you created for batch prediction. This differs from deploying a `Model` resource for online prediction. For online prediction, you: 1. Create an `Endpoint` resource for deploying the `Model` resource to. 2. Deploy the `Model` resource to the `Endpoint` resource. 3. Make online prediction requests to the `Endpoint` resource. For batch-prediction, you: 1. Create a batch prediction job. 2. The job service will provision resources for the batch prediction request. 3. The results of the batch prediction request are returned to the caller. 4. The job service will unprovision the resoures for the batch prediction request. ## Send a batch prediction request Send a batch prediction to your deployed model. ``` test_item_1 = x_test[0] test_label_1 = y_test[0] test_item_2 = x_test[1] test_label_2 = y_test[1] print(test_item_1.shape) ``` ### Make the batch input file Now make a batch input file, which you will store in your local Cloud Storage bucket. Each instance in the prediction request is a dictionary entry of the form: {serving_input: content} - `serving_input`: the name of the input layer of the underlying model. - `content`: The feature values of the test item as a list. ``` import json gcs_input_uri = BUCKET_NAME + "/" + "test.jsonl" with tf.io.gfile.GFile(gcs_input_uri, 'w') as f: data = {serving_input: test_item_1.tolist()} f.write(json.dumps(data) + '\n') data = {serving_input: test_item_2.tolist()} f.write(json.dumps(data) + '\n') ``` ### Compute instance scaling You have several choices on scaling the compute instances for handling your batch prediction requests: - Single Instance: The batch prediction requests are processed on a single compute instance. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one. - Manual Scaling: The batch prediction requests are split across a fixed number of compute instances that you manually specified. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and batch prediction requests are evenly distributed across them. - Auto Scaling: The batch prediction requests are split across a scaleable number of compute instances. - Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions. The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request. ``` MIN_NODES = 1 MAX_NODES = 1 ``` ### Make batch prediction request Now that your batch of two test items is ready, let's do the batch request. Use this helper function `create_batch_prediction_job`, with the following parameters: - `display_name`: The human readable name for the prediction job. - `model_name`: The Vertex AI fully qualified identifier for the `Model` resource. - `gcs_source_uri`: The Cloud Storage path to the input file -- which you created above. - `gcs_destination_output_uri_prefix`: The Cloud Storage path that the service will write the predictions to. - `parameters`: Additional filtering parameters for serving prediction results. The helper function calls the job client service's `create_batch_prediction_job` metho, with the following parameters: - `parent`: The Vertex AI location root path for Dataset, Model and Pipeline resources. - `batch_prediction_job`: The specification for the batch prediction job. Let's now dive into the specification for the `batch_prediction_job`: - `display_name`: The human readable name for the prediction batch job. - `model`: The Vertex AI fully qualified identifier for the `Model` resource. - `dedicated_resources`: The compute resources to provision for the batch prediction job. - `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. - `starting_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`. - `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`. - `model_parameters`: Additional filtering parameters for serving prediction results. No Additional parameters are supported for custom models. - `input_config`: The input source and format type for the instances to predict. - `instances_format`: The format of the batch prediction request file: `csv` or `jsonl`. - `gcs_source`: A list of one or more Cloud Storage paths to your batch prediction requests. - `output_config`: The output destination and format for the predictions. - `prediction_format`: The format of the batch prediction response file: `csv` or `jsonl`. - `gcs_destination`: The output destination for the predictions. This call is an asychronous operation. You will print from the response object a few select fields, including: - `name`: The Vertex AI fully qualified identifier assigned to the batch prediction job. - `display_name`: The human readable name for the prediction batch job. - `model`: The Vertex AI fully qualified identifier for the Model resource. - `generate_explanations`: Whether True/False explanations were provided with the predictions (explainability). - `state`: The state of the prediction job (pending, running, etc). Since this call will take a few moments to execute, you will likely get `JobState.JOB_STATE_PENDING` for `state`. ``` BATCH_MODEL = "boston_batch-" + TIMESTAMP def create_batch_prediction_job(display_name, model_name, gcs_source_uri, gcs_destination_output_uri_prefix, parameters=None): if DEPLOY_GPU: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_type": DEPLOY_GPU, "accelerator_count": DEPLOY_NGPU, } else: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_count": 0, } batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' "model": model_name, "model_parameters": json_format.ParseDict(parameters, Value()), "input_config": { "instances_format": IN_FORMAT, "gcs_source": {"uris": [gcs_source_uri]}, }, "output_config": { "predictions_format": OUT_FORMAT, "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, }, "dedicated_resources": { "machine_spec": machine_spec, "starting_replica_count": MIN_NODES, "max_replica_count": MAX_NODES } , 'generate_explanation': True } response = clients['job'].create_batch_prediction_job( parent=PARENT, batch_prediction_job=batch_prediction_job ) print("response") print(" name:", response.name) print(" display_name:", response.display_name) print(" model:", response.model) try: print(" generate_explanation:", response.generate_explanation) except: pass print(" state:", response.state) print(" create_time:", response.create_time) print(" start_time:", response.start_time) print(" end_time:", response.end_time) print(" update_time:", response.update_time) print(" labels:", response.labels) return response IN_FORMAT = 'jsonl' OUT_FORMAT = 'jsonl' response = create_batch_prediction_job(BATCH_MODEL, model_to_deploy_id, gcs_input_uri, BUCKET_NAME) ``` Now get the unique identifier for the batch prediction job you created. ``` # The full unique ID for the batch job batch_job_id = response.name # The short numeric ID for the batch job batch_job_short_id = batch_job_id.split('/')[-1] print(batch_job_id) ``` ### Get information on a batch prediction job Use this helper function `get_batch_prediction_job`, with the following paramter: - `job_name`: The Vertex AI fully qualified identifier for the batch prediction job. The helper function calls the job client service's `get_batch_prediction_job` method, with the following paramter: - `name`: The Vertex AI fully qualified identifier for the batch prediction job. In this tutorial, you will pass it the Vertex fully qualified identifier for your batch prediction job -- `batch_job_id` The helper function will return the Cloud Storage path to where the predictions are stored -- `gcs_destination`. ``` def get_batch_prediction_job(job_name, silent=False): response = clients['job'].get_batch_prediction_job(name=job_name) if silent: return response.output_config.gcs_destination.output_uri_prefix, response.state print("response") print(" name:", response.name) print(" display_name:", response.display_name) print(" model:", response.model) try: # not all data types support explanations print(" generate_explanation:", response.generate_explanation) except: pass print(" state:", response.state) print(" error:", response.error) gcs_destination = response.output_config.gcs_destination print(" gcs_destination") print(" output_uri_prefix:", gcs_destination.output_uri_prefix) return gcs_destination.output_uri_prefix, response.state predictions, state = get_batch_prediction_job(batch_job_id) ``` ### Get the predictions When the batch prediction is done processing, the job state will be `JOB_STATE_SUCCEEDED`. Finally you view the predictions stored at the Cloud Storage path you set as output. The predictions will be in a JSONL format, which you indicated at the time you made the batch prediction job. The predictions are in a subdirectory starting with the name `prediction`. Within that subdirectory there is a file named `prediction.results-xxxxx-of-xxxxx`. Now display (cat) the contents. You will see multiple JSON objects, one for each prediction. Finally you view the explanations stored at the Cloud Storage path you set as output. The explanations will be in a JSONL format, which you indicated at the time you made the batch explanation job. The explanations are in a subdirectory starting with the name `prediction`. Within that subdirectory there is a file named `explanations.results-xxxxx-of-xxxxx`. Let's display (cat) the contents. You will a row for each prediction -- in this case, there is just one row. The row contains: - `dense_input`: The input for the prediction. - `prediction`: The predicted value. ``` def get_latest_predictions(gcs_out_dir): ''' Get the latest prediction subfolder using the timestamp in the subfolder name''' folders = !gsutil ls $gcs_out_dir latest = "" for folder in folders: subfolder = folder.split('/')[-2] if subfolder.startswith('prediction-'): if subfolder > latest: latest = folder[:-1] return latest while True: predictions, state = get_batch_prediction_job(batch_job_id, True) if state != aip.JobState.JOB_STATE_SUCCEEDED: print("The job has not completed:", state) if state == aip.JobState.JOB_STATE_FAILED: break else: folder = get_latest_predictions(predictions) ! gsutil ls $folder/explanation.results* print("Results:") ! gsutil cat $folder/explanation.results* print("Errors:") ! gsutil cat $folder/prediction.errors* break time.sleep(60) ``` # Cleaning up To clean up all Google Cloud resources used in this project, you can [delete the GCP project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: - Dataset - Pipeline - Model - Endpoint - Batch Job - Custom Job - Hyperparameter Tuning Job - Cloud Storage Bucket ``` delete_dataset = True delete_pipeline = True delete_model = True delete_endpoint = True delete_batchjob = True delete_customjob = True delete_hptjob = True delete_bucket = True # Delete the dataset using the Vertex fully qualified identifier for the dataset try: if delete_dataset and 'dataset_id' in globals(): clients['dataset'].delete_dataset(name=dataset_id) except Exception as e: print(e) # Delete the training pipeline using the Vertex fully qualified identifier for the pipeline try: if delete_pipeline and 'pipeline_id' in globals(): clients['pipeline'].delete_training_pipeline(name=pipeline_id) except Exception as e: print(e) # Delete the model using the Vertex fully qualified identifier for the model try: if delete_model and 'model_to_deploy_id' in globals(): clients['model'].delete_model(name=model_to_deploy_id) except Exception as e: print(e) # Delete the endpoint using the Vertex fully qualified identifier for the endpoint try: if delete_endpoint and 'endpoint_id' in globals(): clients['endpoint'].delete_endpoint(name=endpoint_id) except Exception as e: print(e) # Delete the batch job using the Vertex fully qualified identifier for the batch job try: if delete_batchjob and 'batch_job_id' in globals(): clients['job'].delete_batch_prediction_job(name=batch_job_id) except Exception as e: print(e) # Delete the custom job using the Vertex fully qualified identifier for the custom job try: if delete_customjob and 'job_id' in globals(): clients['job'].delete_custom_job(name=job_id) except Exception as e: print(e) # Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job try: if delete_hptjob and 'hpt_job_id' in globals(): clients['job'].delete_hyperparameter_tuning_job(name=hpt_job_id) except Exception as e: print(e) if delete_bucket and 'BUCKET_NAME' in globals(): ! gsutil rm -r $BUCKET_NAME ```
github_jupyter
## 一,优化器的使用 ``` import tensorflow as tf import numpy as np #打印时间分割线 @tf.function def printbar(): ts = tf.timestamp() today_ts = ts%(24*60*60) hour = tf.cast(today_ts//3600+8,tf.int32)%tf.constant(24) minite = tf.cast((today_ts%3600)//60,tf.int32) second = tf.cast(tf.floor(today_ts%60),tf.int32) def timeformat(m): if tf.strings.length(tf.strings.format("{}",m))==1: return(tf.strings.format("0{}",m)) else: return(tf.strings.format("{}",m)) timestring = tf.strings.join([timeformat(hour),timeformat(minite), timeformat(second)],separator = ":") tf.print("=========="*8,end = "") tf.print(timestring) # 求f(x) = a*x**2 + b*x + c的最小值 # 使用optimizer.apply_gradients x = tf.Variable(0.0,name = "x",dtype = tf.float32) optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) @tf.function def minimizef(): a = tf.constant(1.0) b = tf.constant(-2.0) c = tf.constant(1.0) while tf.constant(True): with tf.GradientTape() as tape: y = a*tf.pow(x,2) + b*x + c dy_dx = tape.gradient(y,x) optimizer.apply_gradients(grads_and_vars=[(dy_dx,x)]) #迭代终止条件 if tf.abs(dy_dx)<tf.constant(0.00001): break if tf.math.mod(optimizer.iterations,100)==0: printbar() tf.print("step = ",optimizer.iterations) tf.print("x = ", x) tf.print("") y = a*tf.pow(x,2) + b*x + c return y tf.print("y =",minimizef()) tf.print("x =",x) # 求f(x) = a*x**2 + b*x + c的最小值 # 使用optimizer.minimize x = tf.Variable(0.0,name = "x",dtype = tf.float32) optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) def f(): a = tf.constant(1.0) b = tf.constant(-2.0) c = tf.constant(1.0) y = a*tf.pow(x,2)+b*x+c return(y) @tf.function def train(epoch = 1000): for _ in tf.range(epoch): optimizer.minimize(f,[x]) tf.print("epoch = ",optimizer.iterations) return(f()) train(1000) tf.print("y = ",f()) tf.print("x = ",x) # 求f(x) = a*x**2 + b*x + c的最小值 # 使用model.fit tf.keras.backend.clear_session() class FakeModel(tf.keras.models.Model): def __init__(self,a,b,c): super(FakeModel,self).__init__() self.a = a self.b = b self.c = c def build(self): self.x = tf.Variable(0.0,name = "x") self.built = True def call(self,features): loss = self.a*(self.x)**2+self.b*(self.x)+self.c return(tf.ones_like(features)*loss) def myloss(y_true,y_pred): return tf.reduce_mean(y_pred) model = FakeModel(tf.constant(1.0),tf.constant(-2.0),tf.constant(1.0)) model.build() model.summary() model.compile(optimizer = tf.keras.optimizers.SGD(learning_rate=0.01),loss = myloss) history = model.fit(tf.zeros((100,2)), tf.ones(100),batch_size = 1,epochs = 10) #迭代1000次 tf.print("x=",model.x) tf.print("loss=",model(tf.constant(0.0))) ``` ## 二,内置优化器 深度学习优化算法大概经历了 SGD -> SGDM -> NAG ->Adagrad -> Adadelta(RMSprop) -> Adam -> Nadam 这样的发展历程。 在keras.optimizers子模块中,它们基本上都有对应的类的实现。 * SGD, 默认参数为纯SGD, 设置momentum参数不为0实际上变成SGDM, 考虑了一阶动量, 设置 nesterov为True后变成NAG,即 Nesterov Accelerated Gradient,在计算梯度时计算的是向前走一步所在位置的梯度。 * Adagrad, 考虑了二阶动量,对于不同的参数有不同的学习率,即自适应学习率。缺点是学习率单调下降,可能后期学习速率过慢乃至提前停止学习。 * RMSprop, 考虑了二阶动量,对于不同的参数有不同的学习率,即自适应学习率,对Adagrad进行了优化,通过指数平滑只考虑一定窗口内的二阶动量。 * Adadelta, 考虑了二阶动量,与RMSprop类似,但是更加复杂一些,自适应性更强。 * Adam, 同时考虑了一阶动量和二阶动量,可以看成RMSprop上进一步考虑了一阶动量。 * Nadam, 在Adam基础上进一步考虑了 Nesterov Acceleration。
github_jupyter
# Incremental modeling with decision optimization This tutorial includes everything you need to set up decision optimization engines, build a mathematical programming model, then incrementally modify it. You will learn how to: - change coefficients in an expression - add terms in an expression - modify constraints and variables bounds - remove/add constraints - play with relaxations When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_. >This notebook is part of the **[Prescriptive Analytics for Python](https://rawgit.com/IBMDecisionOptimization/docplex-doc/master/docs/index.html)** >It requires a valid subscription to **Decision Optimization on the Cloud** or a **local installation of CPLEX Optimizers**. Discover us [here](https://developer.ibm.com/docloud) Table of contents: - [Describe the business problem](#Describe-the-business-problem:--Games-Scheduling-in-the-National-Football-League) * [How decision optimization (prescriptive analytics) can help](#How--decision-optimization-can-help) * [Use decision optimization](#Use-decision-optimization) * [Step 1: Download the library](#Step-1:-Download-the-library) * [Step 2: Set up the engines](#Step-2:-Set-up-the-prescriptive-engine) * [Step 3: Set up the prescriptive model](#Step-3:-Set-up-the-prescriptive-model) * [Step 4: Modify the model](#Step-4:-Modify-the-model) * [Summary](#Summary) **** ## Describe the business problem: Telephone production A possible descriptive model of the telephone production problem is as follows: * Decision variables: * Number of desk phones produced (DeskProduction) * Number of cellular phones produced (CellProduction) Objective: Maximize profit * Constraints: * The DeskProduction should be greater than or equal to 100. * The CellProduction should be greater than or equal to 100. * The assembly time for DeskProduction plus the assembly time for CellProduction should not exceed 400 hours. * The painting time for DeskProduction plus the painting time for CellProduction should not exceed 490 hours. This is a type of discrete optimization problem that can be solved by using either **Integer Programming** (IP) or **Constraint Programming** (CP). > **Integer Programming** is the class of problems defined as the optimization of a linear function, subject to linear constraints over integer variables. > **Constraint Programming** problems generally have discrete decision variables, but the constraints can be logical, and the arithmetic expressions are not restricted to being linear. For the purposes of this tutorial, we will illustrate a solution with mathematical programming (MP). ## How decision optimization can help * Prescriptive analytics (decision optimization) technology recommends actions that are based on desired outcomes. It takes into account specific scenarios, resources, and knowledge of past and current events. With this insight, your organization can make better decisions and have greater control of business outcomes. * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes. * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage. <br/> <u>With prescriptive analytics, you can:</u> * Automate the complex decisions and trade-offs to better manage your limited resources. * Take advantage of a future opportunity or mitigate a future risk. * Proactively update recommendations based on changing events. * Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes. ## Use decision optimization ### Step 1: Download the library Run the following code to install Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming and Constraint Programming, referred to earlier. ``` import docplex check = (docplex.__version__ >= '2.1') if check is False: !conda install -y -c ibmdecisionoptimization docplex ``` A restart of the kernel might be needed. ### Step 2: Set up the prescriptive engine * Subscribe to the [Decision Optimization on Cloud solve service](https://developer.ibm.com/docloud). * Get the service URL and your personal API key. ``` from docplex.mp.model import * SVC_URL = "ENTER YOUR URL HERE" SVC_KEY = "ENTER YOUR KEY HERE" ``` ### Step 3: Set up the prescriptive model #### Writing a mathematical model Convert the descriptive model into a mathematical model: * Use the two decision variables DeskProduction and CellProduction * Use the data given in the problem description (remember to convert minutes to hours where appropriate) * Write the objective as a mathematical expression * Write the constraints as mathematical expressions (use “=”, “<=”, or “>=”, and name the constraints to describe their purpose) * Define the domain for the decision variables #### Telephone production: a mathematical model To express the last two constraints, we model assembly time and painting time as linear combinations of the two productions, resulting in the following mathematical model: <code>maximize: 12 desk_production+20 cell_production subject to: desk_production>=100 cell_production>=100 0.2 desk_production+0.4 cell_production<=400 0.5 desk_production+0.4 cell_production<=490 </code> ``` # first import the Model class from docplex.mp from docplex.mp.model import Model # create one model instance, with a name m = Model(name='telephone_production') ``` The continuous variable desk represents the production of desk telephones. The continuous variable cell represents the production of cell phones. ``` # by default, all variables in Docplex have a lower bound of 0 and infinite upper bound desk = m.integer_var(name='desk') cell = m.integer_var(name='cell') m.maximize(12 * desk + 20 * cell) # write constraints # constraint #1: desk production is greater than 100 m.add_constraint(desk >= 100, "desk") # constraint #2: cell production is greater than 100 m.add_constraint(cell >= 100, "cell") # constraint #3: assembly time limit ct_assembly = m.add_constraint( 0.2 * desk + 0.4 * cell <= 400, "assembly_limit") # constraint #4: paiting time limit ct_painting = m.add_constraint( 0.5 * desk + 0.4 * cell <= 490, "painting_limit") ``` #### Solve with Decision Optimization solve service If url and key are None, the Modeling layer will look for a local runtime, otherwise will use the credentials. Look at the documentation for a good understanding of the various solving/generation modes. If you're using a Community Edition of CPLEX runtimes, depending on the size of the problem, the solve stage may fail and will need a paying subscription or product installation. You will get the best solution found after ***n*** seconds, thanks to a time limit parameter. ``` m.print_information() msol = m.solve(url=SVC_URL, key=SVC_KEY) assert msol is not None, "model can't solve" m.print_solution() ``` ### Step 4: Modify the model #### Modify constraints and variables bounds The model object provides getters to retrieve variables and constraints by name: * get_var_by_name * get_constraint_by_name The variable and constraint objects both provide properties to access the right hand side (rhs) and left hand side (lhs). When you modify a rhs or lhs of a variable, you of course need to give a number. When you modify a rhs or lhs of a constraint, you can give a number or an expression based on variables. Let's say we want to build 2000 cells and 1000 desks maximum. And let's say we want to increase the production of both of them from 100 to 350 ``` # Access by name m.get_var_by_name("desk").ub = 2000 # acess via the object cell.ub = 1000 m.get_constraint_by_name("desk").rhs = 350 m.get_constraint_by_name("cell").rhs = 350 msol = m.solve(url=SVC_URL, key=SVC_KEY) assert msol is not None, "model can't solve" m.print_solution() ``` The production plan has been updated accordingly to our small changes. #### Modify expressions We now want to introduce a new type of product: the "hybrid" telephone. ``` hybrid = m.integer_var(name='hybrid') ``` We need to: - introduce it in the objective - introduce it in the existing painting and assembly time constraints - add a new constraint for its production to produce at least 350 of them. ``` m.add_constraint(hybrid >= 350) ; ``` The objective will move from <code> maximize: 12 desk_production+20 cell_production </code> to <code> maximize: 12 desk_production+20 cell_production + 10 hybrid_prodction </code> ``` m.get_objective_expr().add_term(hybrid, 10) ; ``` The time constraints will be updated from <code> 0.2 desk_production+0.4 cell_production<=400 0.5 desk_production+0.4 cell_production<=490 </code> to <code> 0.2 desk_production+0.4 cell_production + 0.2 hybrid_production<=400 0.5 desk_production+0.4 cell_production + 0.2 hybrid_production<=490 </code> When you add a constraint to a model, its object is returned to you by the method add_constraint. If you don't have it, you can access it via its name ``` m.get_constraint_by_name("assembly_limit").lhs.add_term(hybrid, 0.2) ct_painting.lhs.add_term(hybrid, 0.2) ; ``` We can now compute the new production plan for our 3 products ``` msol = m.solve(url=SVC_URL, key=SVC_KEY) assert msol is not None, "model can't solve" m.print_solution() ``` Let's now say we improved our painting process, the distribution of the coefficients in the painting limits is not [0.5, 0.4, 0.2] anymore but [0.1, 0.1, 0.1] When you have the hand on an expression, you can modify the coefficient variable by variable with set_coefficient or via a list of (variable, coeff) with set_coefficients ``` ct_painting.lhs.set_coefficients([(desk, 0.1), (cell, 0.1), (hybrid, 0.1)]) msol = m.solve(url=SVC_URL, key=SVC_KEY) assert msol is not None, "model can't solve" m.print_solution() ``` #### Relaxations Let's now introduce a new constraint: polishing time limit. ``` # constraint: polishing time limit ct_polishing = m.add_constraint( 0.6 * desk + 0.6 * cell + 0.3 * hybrid <= 290, "polishing_limit") msol = m.solve(url=SVC_URL, key=SVC_KEY) if msol is None: print("model can't solve") ``` The model is now infeasible. We need to handle it and dig into the infeasibilities. You can now use the Relaxer object. You can control the way it will relax the constraints or you can use 1 of the various automatic modes: - 'all' relaxes all constraints using a MEDIUM priority; this is the default. - 'named' relaxes all constraints with a user name but not the others. - 'match' looks for priority names within constraint names; unnamed constraints are not relaxed. We will use the 'match' mode. Polishing constraint is mandatory. Painting constraint is a nice to have. Assembly constraint has low priority. ``` ct_polishing.name = "high_"+ct_polishing.name ct_assembly.name = "low_"+ct_assembly.name ct_painting.name = "medium_"+ct_painting.name # if a name contains "low", it has priority LOW # if a ct name contains "medium" it has priority MEDIUM # same for HIGH # if a constraint has no name or does not match any, it is not relaxable. from docplex.mp.relaxer import Relaxer relaxer = Relaxer(prioritizer='match', verbose=True) relaxed_sol = relaxer.relax(m, url=SVC_URL, key=SVC_KEY) relaxed_ok = relaxed_sol is not None assert relaxed_ok, "relaxation failed" relaxer.print_information() m.print_solution() ct_polishing_relax = relaxer.get_relaxation(ct_polishing) print("* found slack of {0} for polish ct".format(ct_polishing_relax)) ct_polishing.rhs+= ct_polishing_relax m.solve(url=SVC_URL, key=SVC_KEY) m.report() m.print_solution() ``` ## Summary You learned how to set up and use the IBM Decision Optimization CPLEX Modeling for Python to formulate a Mathematical Programming model and modify it in various ways. #### References * [Decision Optimization CPLEX Modeling for Python documentation](https://rawgit.com/IBMDecisionOptimization/docplex-doc/master/docs/index.html) * [Decision Optimization on Cloud](https://developer.ibm.com/docloud/) * Need help with DOcplex or to report a bug? Please go [here](https://developer.ibm.com/answers/smartspace/docloud) * Contact us at dofeedback@wwpdl.vnet.ibm.com" Copyright © 2017 IBM. Sample Materials.
github_jupyter
``` %matplotlib inline import os import sys # Modify the path sys.path.append("..") import yellowbrick as yb import matplotlib.pyplot as plt ``` # Using Yellowbrick to Explore Book Reviews This notebook is for the Yellowbrick user study. About the data: [Amazon book reviews Data Set](http://archive.ics.uci.edu/ml/datasets/Amazon+book+reviews) Abstract: 213,335 book reviews for 8 different books. Source: Ahmet Taspinar, info '@' ataspinar.com, http://ataspinar.com Data Set Information: - Gone Girl: 41,974 - The Girl on the Train: 37,139 - The Fault in our Stars: 35,844 - Fifty Shades of Grey: 32,977 - Unbroken: 25,876 - The hunger games: 24,027 - The Goldfinch: 22,861 - The Martian: 22,571 Attribute Information: Each entry is separated by a newline character. Each entry contains four attributes, which are separated by a space: 1. review score 2. tail of review url 3. review title 4. HTML of review text After [downloading the data](http://archive.ics.uci.edu/ml/machine-learning-databases/00370/amazon_book_reviews.rar) in .rar archive form, I unpacked it with `unrar`: _(if you don't have unrar)_ $ brew install unrar # or use apt-get or yum, depending on your system $ urar e amazon_book_reviews.rar The result is the following 8 csv files and a metadata.txt file: - Andy-Weir-The-Martian.csv - Laura-Hillenbrand-Unbroken.csv - Donna-Tartt-The-Goldfinch.csv - Paula_Hawkins-The-Girl-On-The-Train.csv - EL-James-Fifty-Shades-of-Grey.csv - Suzanne-Collins-The-Hunger-Games.csv - Fillian_Flynn-Gone_Girl.csv - John-Green-The-Fault-in-our-Stars.csv - metadata.txt ``` from sklearn.datasets.base import Bunch ## The path to the test data sets FIXTURES = os.path.join(os.getcwd(), "data") ## Dataset loading mechanisms datasets = { "reviews": os.path.join(FIXTURES, "reviews") } def load_data(name, download=True): """ Loads and wrangles the passed in text corpus by name. If download is specified, this method will download any missing files. """ # Get the path from the datasets path = datasets[name] # Read the files in the directory as the categories. categories = [ os.path.splitext(f)[0] for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) and os.path.join(path, f).endswith(".csv") ] files = [] # holds the file names relative to the root data = [] # holds the text read from the file target = [] # holds the string of the category # Load the data from the files in the corpus for cat in categories: files.append(os.path.join(path, cat + '.csv')) with open(os.path.join(path, cat + '.csv'), 'r') as f: content = f.read() docs = [s.strip() for s in content.splitlines()] for doc in docs[:1000]: # limited size so nb won't crash data.append(doc) target.append(cat) # Return the data bunch for use similar to the newsgroups example return Bunch( categories=categories, files=files, data=data, target=target, ) corpus = load_data('reviews') ``` ### Visualizing Stopwords Removal How much does stopwords removal impact a corpus of book reviews? To visualize the transformation, we can compare the results before and after stopwords have been removed from the corpus using the Yellowbrick `FreqDistVisualizer`: ``` from yellowbrick.text.freqdist import FreqDistVisualizer from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer() docs = vectorizer.fit_transform(corpus.data) features = vectorizer.get_feature_names() visualizer = FreqDistVisualizer() visualizer.fit(docs, features) visualizer.show() vectorizer = CountVectorizer(stop_words='english') docs = vectorizer.fit_transform(corpus.data) features = vectorizer.get_feature_names() visualizer = FreqDistVisualizer() visualizer.fit(docs, features) visualizer.show() ``` ### Visualizing tokens across corpora It is also interesting to explore the differences in tokens across a corpus. For example, do people say different things in reviews about books by men vs. books by women? ``` male = ['Andy-Weir-The-Martian', 'John-Green-The-Fault-in-our-Stars'] female = ['Laura-Hillenbrand-Unbroken', 'Paula_Hawkins-The-Girl-On-The-Train', 'Suzanne-Collins-The-Hunger-Games', 'Donna-Tartt-The-Goldfinch', 'EL-James-Fifty-Shades-of-Grey', 'Fillian_Flynn-Gone_Girl'] male_author_reviews = [] female_author_reviews = [] for book in male: for idx in range(len(corpus.data)): if corpus.target[idx] == book: male_author_reviews.append(corpus.data[idx]) for book in female: for idx in range(len(corpus.data)): if corpus.target[idx] == book: female_author_reviews.append(corpus.data[idx]) vectorizer = CountVectorizer(stop_words='english') docs = vectorizer.fit_transform(text for text in female_author_reviews) features = vectorizer.get_feature_names() visualizer = FreqDistVisualizer() visualizer.fit(docs, features) visualizer.show() vectorizer = CountVectorizer(stop_words='english') docs = vectorizer.fit_transform(text for text in male_author_reviews) features = vectorizer.get_feature_names() visualizer = FreqDistVisualizer() visualizer.fit(docs, features) visualizer.show() ``` ## t-SNE: Corpus Visualization What patterns can we see if we project the book reviews into 2 dimensional space? ``` from yellowbrick.text import TSNEVisualizer from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer() docs = tfidf.fit_transform(corpus.data) labels = corpus.target # Create the visualizer and draw the vectors tsne = TSNEVisualizer() tsne.fit(docs, labels) tsne.show() # Only visualize the books by female authors tsne = TSNEVisualizer(classes=female) tsne.fit(docs, labels) tsne.show() # Only visualize the books by male authors tsne = TSNEVisualizer(classes=male) tsne.fit(docs, labels) tsne.show() ```
github_jupyter
# Results - CIViC smMIPs panel rescues clinically relevant variants ## Tools ``` #!/usr/bin/env python3 import numpy as np import pandas as pd import matplotlib.pyplot as plt import glob import scipy.stats as ss import seaborn as sns sns.set(style='white') sns.set_context("talk") from pyliftover import LiftOver lo = LiftOver('hg19', 'hg38') li = LiftOver('hg38', 'hg19') ``` ## Pull in Input Files ``` overlap_with_smmips = pd.read_csv('../output/variant_overlap2.tsv', sep='\t') samples_QC = pd.read_csv('../data/validation_samples/sequencing_quality_check.txt', sep='\t') sample_info = pd.read_csv('../data/validation_samples/sample_dataframe.txt', sep='\t') WEX_variants = pd.read_csv('../data/original_sequencing/VCF_exome.txt', sep='\t') for i,row in WEX_variants.iterrows(): chrom = str('chr' + str(row['chromosome_name'])) start = int(row['start']) stop = int(row['stop']) if row['genome'] == 38: if li.convert_coordinate(chrom, start): start_new = li.convert_coordinate(chrom, start) stop_new = li.convert_coordinate(chrom, stop) WEX_variants.loc[i, 'start'] = start_new[0][1] WEX_variants.loc[i, 'stop'] = stop_new[0][1] WEX_variants.loc[i, 'genome'] = 37 WEX_variants = WEX_variants.filter(items=['chromosome_name','start','stop','reference', 'variant', 'sample']) WEX_variants.columns = ['chrom', 'start', 'stop', 'reference', 'variant', 'sample'] for i,row in WEX_variants.iterrows(): chrom = str('chr' + str(row['chrom'])) WEX_variants.loc[i, 'chrom'] = chrom ``` ## smMIPs variant rescue for samples with both tumor and matched normal ``` overlap_with_smmips_data = overlap_with_smmips.merge(sample_info, right_on='Sample', left_on='sample') tumor_normal_samples = [] for item in overlap_with_smmips_data[(overlap_with_smmips_data['Passed QC'] == 'yes') & (overlap_with_smmips_data['Matched Normal'] == 'yes')]['sample'].drop_duplicates(): tumor_normal_samples.append(item) def find_somatic_variants(name): print(name) to_iterate = [] for item in glob.glob('../data/smmips_sequencing/*.vcf'): file_name = item.split('/')[-1].split('_')[0] +'_' + item.split('/')[-1].split('_')[1] if name == file_name: to_iterate.append(item) if len(to_iterate) != 4: print("Normal sequencing Failed for " + name + " ... skiping this sample") print() return 0,0 else: tumor_variants = pd.DataFrame() normal_variants = pd.DataFrame() for item in to_iterate: current = pd.read_csv(item, sep='\t', comment='#', header=None).filter(items=[0,1,1,3,4]) if len(current) > 1: if item.split('_')[3].split('.')[0] == 'T': tumor_variants = tumor_variants.append(current) if item.split('_')[3].split('.')[0] == 'N': normal_variants = normal_variants.append(current) tumor_variants.columns = ['chrom', 'start', 'stop', 'reference', 'variant'] normal_variants.columns = ['chrom', 'start', 'stop', 'reference', 'variant'] print('Total tumor varinats: ', len(tumor_variants)) somatic = tumor_variants.merge(normal_variants, how='outer', indicator=True) somatic = somatic[somatic['_merge'] == 'left_only'].drop('_merge', axis=1) already_found = WEX_variants[WEX_variants['sample'] == name].drop('sample', axis=1) somatic = somatic.merge(already_found, how='outer', indicator=True) somatic = somatic[somatic['_merge'] == 'left_only'] somatic.drop('_merge', axis=1).to_csv('../data/manual_review/' + name +'.bed.txt',sep='\t',index=False) print('Total somatic varinats: ', len(somatic)) print() return 1, int(len(somatic)) print() eligible_samples = 0 total_variants = 0 for item in tumor_normal_samples: sample, somatic_variants = find_somatic_variants(item) eligible_samples += sample total_variants +=somatic_variants print('Total samples with paired tumor/normal sequencing: ', eligible_samples) print('Total stomatic variants identified exclusively on smMIPs: ', total_variants) ``` # Determine Cause of Missed Variants ``` manual_review_dataframe = pd.DataFrame() for item in glob.glob('../data/manual_review/*_postMR.bed.txt'): name = item.split('/')[3].split('.')[0].strip('_postMR') current_df = pd.read_csv(item,sep='\t') current_df['Sample'] = name manual_review_dataframe = manual_review_dataframe.append(current_df) smMIPs_somatic = pd.DataFrame() for name in tumor_normal_samples: to_iterate = [] for item in glob.glob('../data/smmips_sequencing/*.vcf'): file_name = item.split('/')[-1].split('_')[0] +'_' + item.split('/')[-1].split('_')[1] if name == file_name: to_iterate.append(item) if len(to_iterate) != 4: continue else: tumor_variants = pd.DataFrame() normal_variants = pd.DataFrame() for item in to_iterate: current = pd.read_csv(item, sep='\t', comment='#', header=None).filter(items=[0,1,1,3,4,9]) if len(current) > 1: if item.split('_')[3].split('.')[0] == 'T': tumor_variants = tumor_variants.append(current) if item.split('_')[3].split('.')[0] == 'N': normal_variants = normal_variants.append(current) tumor_variants.columns = ['chrom', 'start', 'stop', 'reference', 'variant','GT:FALT:FTOT:RALT:RTOT:ALT:TOT:FRAC'] normal_variants.columns = ['chrom', 'start', 'stop', 'reference', 'variant','GT:FALT:FTOT:RALT:RTOT:ALT:TOT:FRAC'] somatic = tumor_variants.merge(normal_variants, how='outer', indicator=True) somatic = somatic[somatic['_merge'] == 'left_only'] somatic['Sample'] = name smMIPs_somatic = smMIPs_somatic.append(somatic) smMIPs_somatic['GT'], smMIPs_somatic['FALT'], smMIPs_somatic['FTOT'], smMIPs_somatic['RALT'], smMIPs_somatic['RTOT'], smMIPs_somatic['ALT'],smMIPs_somatic['TOT'],smMIPs_somatic['FRAC'] = smMIPs_somatic['GT:FALT:FTOT:RALT:RTOT:ALT:TOT:FRAC'].str.split(':', 8).str smMIPs_somatic = smMIPs_somatic.drop(['GT:FALT:FTOT:RALT:RTOT:ALT:TOT:FRAC', '_merge'], axis=1) manual_review_dataframe_merge = manual_review_dataframe.merge(smMIPs_somatic, how='outer', left_on=['Chromosome', 'Start', 'Stop', 'Reference', 'Variant', 'Sample'], right_on=['chrom', 'start', 'stop', 'reference', 'variant', 'Sample'],indicator=True) manual_review_dataframe_merge = manual_review_dataframe_merge[manual_review_dataframe_merge['_merge'] == 'both'] manual_review_dataframe_merge.to_csv('../data/manual_review/manual_review_matrix.txt', sep='\t', index=False) len(manual_review_dataframe_merge) ``` ## Analysis of germline variants ``` #GERMLINE VARIANTS print('Total Germline Variants: ',len(manual_review_dataframe_merge[manual_review_dataframe_merge['Notes'] == 'Germline'])) print(len(manual_review_dataframe_merge[manual_review_dataframe_merge['Notes'] == 'Germline'])/len(manual_review_dataframe_merge)*100,'%') germline_variants = manual_review_dataframe_merge[manual_review_dataframe_merge['Notes'] == 'Germline'] plt.figure(figsize=(5,5)) ax = sns.distplot(pd.to_numeric(germline_variants['FRAC'])*100, bins=15, color='#376E6F') ax.set_xlim([0, 100]) plt.xlabel('Original VAF') plt.ylabel('Frequency') plt.savefig('../data/Figures/Recovery_germline.pdf', bbox_inches='tight', dpi=400) plt.show() plt.close() ``` ## Analysis of variants with sequencing artifacts ``` #SEQUENCING ARTIFACTS artifacts = manual_review_dataframe_merge[manual_review_dataframe_merge['Notes'] == 'Sequencing artifact'] print('Total Sequencing Artifact Variants: ',len(artifacts)) print(len(artifacts)/len(manual_review_dataframe)*100,'%') tags_list = {'MN':'#0E1A1A', 'MM':'#152627', 'DN':'#1C3334', 'D':'#495C5D', 'MV':'#778585', 'TR':'#A4ADAE', } tags = [] for item in artifacts['Tags'].values: for item in item.split(','): tags.append(item) plt.figure(figsize=(5,5)) ax = sns.countplot(x=tags, palette=tags_list, order=['MN', 'MM', 'DN', 'MV', 'TR']) # ax.set_xticklabels(['Mononucleotide', 'Multiple Mismatches', 'Dinucleotide', 'Multiple Variants', 'Tandem Repeat']) plt.xlabel('Artifact Tag') plt.ylabel('Count') plt.savefig('../data/Figures/Recovery_artifact.pdf', bbox_inches='tight', dpi=400) plt.show() plt.close() ``` ## Analysis of variants not called as somatic on original sequencing ``` other = len(manual_review_dataframe) - (len(germline_variants) + len(artifacts)) print('Total Variants not called as somatic by original sequencing: ',other) print(other/len(manual_review_dataframe)*100,'%') ``` ### Analysis of variants with no support on original sequencing ``` not_observed = manual_review_dataframe_merge[manual_review_dataframe_merge['Notes'] == 'No support'] print('Total Variants not observed on original sequencing: ',len(not_observed)) print(len(not_observed)/len(manual_review_dataframe)*100,'%') np.median((pd.to_numeric(not_observed['FRAC']))*100) binomial = 0 total = 0 binomial_name = [] for i,row in not_observed.iterrows(): total +=1 smMIPs_VAF = float(row['FRAC']) original_tumor_coverage = float(row['tumor coverage']) n = original_tumor_coverage p = smMIPs_VAF k = 3 hh = ss.binom(n, p) if hh.cdf(k) > 0.95: binomial +=1 binomial_name.append('>95%') else: binomial_name.append('<95%') not_observed['Binomial Stat'] = binomial_name print('Variants with insufficient coverage: ' + str(binomial)) print('Frequency of Variants with insufficient coverage: ' + str(binomial/total)) binom_list = {'>95%':'#0E1A1A', '<95%':'#2E151B'} plt.figure(figsize=(5,5)) ax = sns.scatterplot(x="tumor coverage", y=pd.to_numeric(not_observed["FRAC"])*100, data=not_observed, alpha=0.8, hue='Binomial Stat', palette=binom_list, legend=False) ax.set_ylim([0, 4]) ax.set_xlim([0, 310]) plt.xlabel('Original Coverage') plt.ylabel('smMIPs VAF (%)') plt.savefig('../data/Figures/Recovery_no-support.pdf', bbox_inches='tight', dpi=400) plt.show() plt.close() ``` ### Analysis of variants with support on original sequencing ``` original_support = manual_review_dataframe_merge[manual_review_dataframe_merge['Notes'] == 'Support'] print('Total Variants not observed on original sequencing: ',len(original_support)) print(len(original_support)/len(manual_review_dataframe)*100,'%') smmips_fraction = [] for item in original_support['FRAC']: smmips_fraction.append(float(item) * 100) original_support['smMIPs VAF'] = smmips_fraction correlation_original = original_support.filter(items=['original_VAF', 'smMIPs VAF']) pd.DataFrame.corr(correlation_original,method='pearson') plt.figure(figsize=(5,5)) sns.scatterplot(x="original_VAF", y="smMIPs VAF", data=original_support, alpha=0.8, color='#295353') plt.xlabel('Original VAF (%)') plt.ylabel('smMIPs VAF (%)') plt.savefig('../data/Figures/Recovery_support.pdf', bbox_inches='tight', dpi=400) plt.show() plt.close() original_support.to_csv('../output/recovered_supported_variants.tsv', sep='\t', index=False) ``` #### Evaluate manual review of supported variants ``` analysis = pd.read_csv('../output/analysis_of_original_MR.txt', sep='\t') analysis.groupby('Notes').size() ```
github_jupyter
# Table of Contents 1. The competition 2. Summary of my approach and results 3. Algorithm details 4. Instructions for running my code 5. What I've learned about competing # 1. The competition In the [Kaggle Passenger Description Algorithm Challenge](https://www.kaggle.com/c/passenger-screening-algorithm-challenge), competitors were asked to identify and locate hidden threats in millimeter-wave 3D body scans. A typical scan had 0-3 threats distributed among 17 predefined body zones. The train and test datasets were each comprised of about 1200 scans. Multiple image formats were provided, from 2D datasets that were order 10GB in size, to 3D datsets that were order 1TB in size. Competitors were asked to predict the probability of threats in all 17 body zones for each scan. # 2. Summary of my approach and results You can download [my complete solution on github](https://github.com/naterm60/KagglePassengerScreening). Instructions for running it can be found in section 4 of this writeup. Note that this is my first full Python project, first project in the cloud, and my first use of transfer learning. Any suggestions are welcome. My background includes math/physics, optimal methods, and extensive use of Mathematica, along with some software engineering and high performance computing. I chose to develop a way of representing a passenger's body surface in 2D, since it's topologically 2D to begin with. I made the 2D representation independent of body type and pose, so that body zones were in dependable positions in the output images. Image channels corresponded to surface characteristics, as describe in the section "Algorithm details". Next, threats needed to be detected. I planned to start simple and progress to more complex approaches as I learned more about deep CNN's. The very simplest approach was to get color histograms of body zones and then identify threats with a binary classifier. Next, I would try the simplest type of transfer learning. The ultimate approach, I thought, would be to generate hand-drawn masks for the entire train dataset, train a network to duplicate the masks, and then train some top layers for threat identification and localization. My simplest approach (color histograms + logistic regression) worked suprisingly well in stage 1, putting me in the top 20% of contestants. So even without using shape and texture information in the processed images, the technique performed well. I ran out of time because this is my first project done fully in Python, I started with only 2 months to go, I have no CNN experience, yada yada. I made late submissions after the competition closed, and found that the color histogram approach did not generalize at all to stage 2 data. Transfer learning did generalize, however, giving a score of 0.22494, which would have been in the bronze medal range. # 3. Algorithm details I'll now discuss my algorithm in more detail. For full detail, refer to the code. I did something a bit different from most competitors. Using .a3d files as input, I "unwrapped" a passenger's body surface into a 2 dimensional representation using a cylindrical coordinate system for each body-part. The 7 body-parts were the 2 legs, 1 trunk, 2 biceps, and 2 forearms, as drawn in Figure 1. A cylinder's axis was allowed to curve but slices were parallel to one other. ![alt text](images/body.png) <center>**Figure 1.** The seven body parts and the assumed coordinate frame</center> Cylindrical coordinate systems were registered to body-parts using estimates of the positions of wrists, elbows, shoulders, feet, the buttock/leg meeting point, and the center of mass. Shoulder height was estimated as follows. Scans were understood to be in the standard reference frame, with the first array dimensions being x, the second y, and the third z, with the passenger placed as shown in Figure 1. The y dimension was averaged over, binarized, and negated, yielding an array like Figure 2. Summing over the y dimension of this array gives a list like the one plotted in Figure 3. Shoulder height was determined by finding the first list element with a value greater than a threshold value. ![alt text](images/shoulder1.png) <center>**Figure 2.** Shoulder region of a passenger, summed over the y dimension, binarized, and negated.</center> <br><br> ![alt text](images/shoulder2.png) <center>**Figure 3.** The array in Figure 2, summed over the z axis.</center> Groin height was estimated as follows. A scan was averaged along the x direction and then binarized. A side profile of the buttock was found by getting, for each z coordinate, the largest y coordinate with array value over a threshold. This list of y coordinates is shown in Figure 4. Groin height was determined from this list by taking the derivative and finding the position of the maximum. This method proved to be much more reliable than direct methods of determining groin height. More direct methods failed due to the the effect of sexual organs, contraband, and thigh gap variation. ![alt text](images/buttock.png) <center>**Figure 4.** Side profile of a buttock.</center> The positions of elbows and wrists were determined using techniques similar to those used for estimating groin and shoulder height. The head was erased prior to elbow and wrist position estimation to prevent interference. A cylindrical coordinate system was then fitted to each body part. This representation of a body segment's geometry was called a "waffle stack", or just "waffles", and is depicted in Figure 5. It was determined as follows. First a basis (**u**, **v**, **w**) was chosen for each body part. There was no rotation for the legs and trunk, so (**u**, **v**, **w**) = (**x**, **y**, **z**) for the legs and trunk. Basii for arm segments were determined using the positions of shoulders, elbows, and wrists. Arm segments were rotated volumetrically to align basii with array axes. For all the body segments, ellipses were then fitted at about 100 positions along the **w** direction. Fitting an ellipse involved determining the offsets and axis lengths in the (**u**, **v**) plane. Ellipse axes were aligned with the **u** and **v** axes. The lists of ellipse offsets and ellipse axis lengths were median filtered to remove spikes and then Savitzky-Golay smoothed. ![alt text](images/legWaffles.png) <center>**Figure 5.** Two types of waffle stacks.</center> Each body part was transformed to a cylindrical coordinate representation using that body part's waffle stack. This gave an array with dimensions (r, &theta;, w), where r is radius, &theta; is the angular coordinate, and w is the axial coordinate. For each (&theta;, w) coordinate, I found the peaks along the r direction. The tallest peak's height, position, and width are the surface reflectivity, radius (AKA surface height), and thickness, respectively, for coordinate (&theta;, w). That's not a precise description, by the way, since the determination of peak height, position and width (I.e. zeroeth, first and second moments) needs to be tweeked to give good results. Anyway, I end up with surface reflectivity, radius and thickness for each (&theta;, w) coordinate. This can be respresented as an image with coordinates (&theta;, w) and 3 channels containing the reflectivity, radius and thickness. All 7 body-part images were joined into one image and saved. The entire proces reduced a 330GB volumetric dataset to 500MB, a 660 times decrease in size. Throughput was 1 scan every 35 seconds. Although 35s is too slow for passenger screening, the algorithm is not parallelized at all. The body segments could be processed in parallel for a 7x speedup, and waffles could be processed in parallel for another order 10x speedup. I call the combined image a "body image". Each body zone defines a rectangular region of the body image. For each scan and each body zone, the zone's rectangular region was extracted. (The sensitive area, region 9, actually consisted of 4 rectangular regions: 2 on the trunk and 1 on each leg. These regions were extracted and joined into a single image.) For each body zone, PCA was performed on all the images that did not contain threats. Projection of an image into the subspace defined by the top few principal components allowed estimation of an image's "background", or the normal non-threat variation. Subtraction of this variation removed differences due to weight, gender, pose, etc. Threats pop out in the resulting images as areas of saturated color on a gray backghround. I would show the images if I could, but competition rules don't permit it. All the background-subtracted images were resized to (139,139) and fed to InceptionV3 for bottleneck feature extraction. The resulting 3x3x2048 vectors were average pooled, yielding a vector of length 2048 per image. These vectors and the known threat probabilities were used to fit a logistic regression binary classifier. # 4. Instructions for running my code These instructions assume that you were a competitor and have access to the competition's bucket. After the competition closes you won't have access to the dataset, but you can still look at the code. 1. Copy stage1_a3d and stage2_a3d to your own bucket. 2. Create a Google Cloud Compute Engine VM instance. I used 8vCPUs, 52GB RAM. 3. Install Google Cloud Datalab on the VM. 4. Download my [github repository](https://github.com/naterm60/KagglePassengerScreening) 5. Upload my project to a folder on your Datalab VM 6. Create the following empty subfolders within the project folder: embedded2D/stage1/, embedded2D/stage2/, highlight/stage1/, log/. 7. Upload these competition files to the project folder: stage1_labels.csv, stage1_sample_submission.csv, stage1_solution.csv, stage2_sample_submission.csv 8. Open the notebook embed2D.ipynb 9. In the section titled "Project-specific imports and initializations", change the bucket name to your own bucket. 10. Run the entire notebook, using python 3. It takes about 30 hours to embed all the stage 1 and 2 scans. 11. Open the notebook transferLearning.ipynb 12. Run all the cells in transferLearning.ipynb, which takes about an hour. # 5. What I've learned about competing For whatever it's worth, here's what I've learned to do to place well in competitions: 1. Approach competitions with a "must-win" attitude. Ruthlessly make the best choices in each area of a model and learn technologies where necessary. 2. Prefer general-purpose existing tools over large, custom codebases developed for fun. There's nothing wrong with the latter if one is aiming for a discovery, but it's not a good strategy for placing well. 3. Take a sink-or-swim approach. I have probably learned more by throwing myself into actual competitions than I could have learned by wading in practice problems. Practice problems are great if you're absolutely new to coding or data science. But if you have experience with math, coding, and crunching data, it's better to skip the pedantic approach. Live competitions are far more motivating and entertaining, and you can still look at practice problems and tutorials for guidance. 4. Do extensive exploratory data analysis. Create methods for finding and investigating a model's worst deficiencies. 5. Start working as soon as a competition is announced. This gives more time to think of improved approaches. Hopefully, one could be finished halfway through the competition, with months remaining to tweak the model. 6. Get a prototype running as quickly as possible, and start making submissions. 7. Do everything to make the cross-validation (CV) train vs. test difference similar to the stage 1 vs. stage 2 difference. Rely exclusively on CV to check a model's performance, rather than using the 1st stage leaderboard for that. In this competition I did well with items 1 and 3 above. In the [Web Traffic Time Series Forecasting Competition](https://www.kaggle.com/c/web-traffic-time-series-forecasting), where I placed 5th and won gold, I did well with everything except item 5.
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Cost-vs-qubits-size" data-toc-modified-id="Cost-vs-qubits-size-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Cost vs qubits size</a></span></li></ul></div> ``` import numpy as np import networkx as nx from loguru import logger as log import matplotlib.pyplot as plt import seaborn as sns from tqdm import tqdm import copy import sys sys.path.append('..') sns.set_style('whitegrid') import qtree import utils import utils_qaoa as qaoa import utils_mproc as mputils %load_ext autoreload %autoreload 2 ``` ## Cost vs qubits size ``` def log_log_scale(): plt.yscale('log') plt.xscale('log') def minorticks(): plt.minorticks_on() plt.grid(which='minor', alpha=0.3, linestyle='-', axis='both') def get_est(xs, vals): mem_est = np.polyfit(xs, np.log(vals), 2) mem_est = np.poly1d(mem_est) est = np.linspace(20,2e2, 100) mem_est = mem_est(est) return est, np.exp(mem_est) sizes = np.arange(13,54,2) results = [ qaoa.get_cost_of_task(s, 1, type='randomreg',degree=3) for s in sizes ] def plot_theory(results, ns): sums = [(max(y[0]), sum(y[1])) for y in results] colors = [plt.cm.gnuplot2(x) for x in np.linspace(.8,.2,2)] memsums, flopsums = zip(*sums) est, mem_est = get_est(ns, memsums) est, flop_est = get_est(ns, flopsums) plt.plot(ns, flopsums, label='total FLOP', color=colors[1]) plt.plot(ns, np.array(memsums), label='maximum Memory', color=colors[0]) #plt.plot(est, mem_est, '--', label='mem log-log fit') #plt.plot(est, flop_est, '--', label='flop log-log fit') plt.xlabel('Number of qbits') plt.yscale('log') #plt.xscale('log') #plt.suptitle('QAOA one amplitude simulation cost', fontsize=14) #plt.title('MaxCut random regular graphs') plt.legend() plt.minorticks_on() plt.grid(which='minor', alpha=0.3, linestyle='-', axis='both') #ax = plt.gca().twinx() #plt.grid(None) #plt.plot(ns, nghssums, label='max ng', color='red') import glob import json thread_folders = sorted(glob.glob('./contract_data/contr_profile_*thr')) print(thread_folders) thread_files = [sorted(glob.glob(folder+'/*.json')) for folder in thread_folders] print(list(map(len, thread_files))) thread_exps = [[json.load(open(f)) for f in files] for files in thread_files] exp_results = [ (max(e['proc_buck memory']) ,1e9*np.array(e['proc_buck time']).sum() ) for exps in thread_exps for e in exps ] print(len(exp_results)) sizes_exp = range(13,49,2) threads_exp = [1, 16] exp_results = np.array(exp_results).reshape(len(thread_exps), len(sizes_exp), 2) print(exp_results.shape) ns = list(zip(*results))[3] #plot_theory(results, ns) print(exp_results[0,:,1].shape) plt.plot( exp_results[0,:,1]) plt.plot( exp_results[1,:,1]) result_rows = list(zip(*results)) plt.plot( list(map(sum, result_rows[1]))[:-2]) plt.yscale('log') plt.savefig('figures/cost_vs_taskS_42d3.pdf') plt.plot(thread_exps[0][-1]['proc_buck time']) plt.yscale('log') total_data = json.load(open('./contract_data/contr_profile_total_13_49_2_42d3.json')) sim_sum = total_data['Total_sim'] new_data = json.load(open('./contract_data/contr_profile_data47_42d3.json')) single_threaded_time = new_data['proc_buck time'] single_threaded_mem = new_data['proc_buck memory'] list(sizes_exp) plt.plot(single_threaded_time[:100]) #plt.yscale('log') colors = [plt.cm.gnuplot2(x) for x in np.linspace(.8,.2,2)] lens = [len(x) for x in result_rows[0]] def unpack_flops(all_flops, map_f=sum): flops = [] for i, s in enumerate(sizes_exp): prev = i end = i+1 prev, end = [sum(lens[:x]) for x in (prev, end)] flops.append(all_flops[prev:end]) sums_flops = [map_f(x) for x in flops] return sums_flops sums_flops = [ unpack_flops(thread_exps[i][-1]['proc_buck time']) for i in range(len(thread_exps)) ] sums_flops = 1e9*np.array(sums_flops) print(sums_flops[0]) print(sums_flops.shape) sums_flops_theory = [sum(x) for x in result_rows[1]] sums_mems_theory = [max(x) for x in result_rows[0]] #for sf in sums_flops: plt.plot(sf) plt.plot(ns, sums_flops_theory, '--' , color=colors[0] , label='FLOP theory' ) plt.plot(ns, 16*np.array(sums_mems_theory), '--' , color=colors[1] , label='Memory theory' ) unp_flop = 1e9*np.array(unpack_flops(single_threaded_time, map_f=max)) unp_mem = unpack_flops(single_threaded_mem, map_f=max) ns_exp = ns[:len(unp_mem)] min_shift = lambda x: np.array(x) - .99*min(x) flop_mem_shifted = (min_shift(x) for x in (unp_flop, unp_mem)) plt.plot(ns_exp, next(flop_mem_shifted), '-' , color=colors[0] , label='FLOP experiment' ) plt.plot(ns_exp, next(flop_mem_shifted), '-' , color=colors[1] , label='Memory experiment' ) plt.legend() plt.yscale('log') plt.minorticks_on() plt.ylabel('Cost of contraction') plt.xlabel('Number of qubits') plt.savefig('figures/theory_vs_exp_tasks_') ```
github_jupyter
``` %matplotlib inline ``` # The double pendulum problem This animation illustrates the double pendulum problem. Double pendulum formula translated from the C code at http://www.physics.usyd.edu.au/~wheat/dpend_html/solve_dpend.c ``` from numpy import sin, cos import numpy as np import matplotlib.pyplot as plt import scipy.integrate as integrate import matplotlib.animation as animation G = 9.8 # acceleration due to gravity, in m/s^2 L1 = 1.0 # length of pendulum 1 in m L2 = 1.0 # length of pendulum 2 in m M1 = 1.0 # mass of pendulum 1 in kg M2 = 1.0 # mass of pendulum 2 in kg def derivs(state, t): dydx = np.zeros_like(state) dydx[0] = state[1] delta = state[2] - state[0] den1 = (M1+M2) * L1 - M2 * L1 * cos(delta) * cos(delta) dydx[1] = ((M2 * L1 * state[1] * state[1] * sin(delta) * cos(delta) + M2 * G * sin(state[2]) * cos(delta) + M2 * L2 * state[3] * state[3] * sin(delta) - (M1+M2) * G * sin(state[0])) / den1) dydx[2] = state[3] den2 = (L2/L1) * den1 dydx[3] = ((- M2 * L2 * state[3] * state[3] * sin(delta) * cos(delta) + (M1+M2) * G * sin(state[0]) * cos(delta) - (M1+M2) * L1 * state[1] * state[1] * sin(delta) - (M1+M2) * G * sin(state[2])) / den2) return dydx # create a time array from 0..100 sampled at 0.05 second steps dt = 0.05 t = np.arange(0, 20, dt) # th1 and th2 are the initial angles (degrees) # w10 and w20 are the initial angular velocities (degrees per second) th1 = 120.0 w1 = 0.0 th2 = -10.0 w2 = 0.0 # initial state state = np.radians([th1, w1, th2, w2]) # integrate your ODE using scipy.integrate. y = integrate.odeint(derivs, state, t) x1 = L1*sin(y[:, 0]) y1 = -L1*cos(y[:, 0]) x2 = L2*sin(y[:, 2]) + x1 y2 = -L2*cos(y[:, 2]) + y1 fig = plt.figure() ax = fig.add_subplot(111, autoscale_on=False, xlim=(-2, 2), ylim=(-2, 2)) ax.set_aspect('equal') ax.grid() line, = ax.plot([], [], 'o-', lw=2) time_template = 'time = %.1fs' time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes) def init(): line.set_data([], []) time_text.set_text('') return line, time_text def animate(i): thisx = [0, x1[i], x2[i]] thisy = [0, y1[i], y2[i]] line.set_data(thisx, thisy) time_text.set_text(time_template % (i*dt)) return line, time_text ani = animation.FuncAnimation(fig, animate, range(1, len(y)), interval=dt*1000, blit=True, init_func=init) plt.show() ```
github_jupyter
# Boosting In this section, we will construct a boosting classifier with the `AdaBoost` algorithm and a boosting regressor with the `AdaBoost.R2` algorithm. These algorithms can use a variety of weak learners but we will use decision tree classifiers and regressors, constructed in {doc}`Chapter 5 </content/c5/concept>`. ``` ## Import decision trees import import_ipynb import classification_tree as ct; ## Import numpy and visualization packages import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import datasets ``` ## 1. Classification with AdaBoost The following is a construction of the binary AdaBoost classifier introduced in the {doc}`concept section </content/c6/s1/boosting>`. Let's again use the {doc}`penguins </content/appendix/data>` dataset from `seaborn`, but rather than predicting the penguin's species (a multiclass problem), we'll predict whether the species is *Adelie* (a binary problem). The data is loaded and split into train vs. test with the hidden code cell below. ``` ## Load data penguins = sns.load_dataset('penguins') penguins.dropna(inplace = True) X = np.array(penguins.drop(columns = ['species', 'island'])) y = 1*np.array(penguins['species'] == 'Adelie') y[y == 0] = -1 ## Train-test split np.random.seed(123) test_frac = 0.25 test_size = int(len(y)*test_frac) test_idxs = np.random.choice(np.arange(len(y)), test_size, replace = False) X_train = np.delete(X, test_idxs, 0) y_train = np.delete(y, test_idxs, 0) X_test = X[test_idxs] y_test = y[test_idxs] ``` Recall that AdaBoost fits *weighted* weak learners. Let's start by defining the weighted loss functions introduced in the {doc}`concept section </content/c6/s1/boosting>`. The helper function `get_weighted_pmk()` calculates $$ \hat{p}_{mk} = \frac{\sumN w_n I(\bx_n \in \mathcal{N}_m)}{\sumN w_n} $$ for each class $k$. The `gini_index()` and `cross_entropy()` functions then call this function and return the appropriate loss. ``` ## Loss Functions def get_weighted_pmk(y, weights): ks = np.unique(y) weighted_pmk = [sum(weights[y == k]) for k in ks] return(np.array(weighted_pmk)/sum(weights)) def gini_index(y, weights): weighted_pmk = get_weighted_pmk(y, weights) return np.sum( weighted_pmk*(1-weighted_pmk) ) def cross_entropy(y, weights): weighted_pmk = get_weighted_pmk(y, weights) return -np.sum(weighted_pmk*np.log2(weighted_pmk)) def split_loss(child1, child2, weights1, weights2, loss = cross_entropy): return (len(child1)*loss(child1, weights1) + len(child2)*loss(child2, weights2))/(len(child1) + len(child2)) ``` In order to incorporate observation weights, we have to make slight adjustments to the `DecisionTreeClassifier` class. In the class we {doc}`previously constructed </content/c5/s2/classification_tree>`, the data from parent nodes was split and funneled anonymously to one of two child nodes. This alone will not allow us to incorporate weights. Instead, we need to also track the ID of each observation so we can track its weight. This is done with the `DecisionTreeClassifier` class defined in the hidden cell below, which is mostly a reconstruction of the class defined in Chapter 5. ``` ## Helper Classes class Node: def __init__(self, Xsub, ysub, observations, ID, depth = 0, parent_ID = None, leaf = True): self.Xsub = Xsub self.ysub = ysub self.observations = observations self.ID = ID self.size = len(ysub) self.depth = depth self.parent_ID = parent_ID self.leaf = leaf class Splitter: def __init__(self): self.loss = np.inf self.no_split = True def _replace_split(self, loss, d, dtype = 'quant', t = None, L_values = None): self.loss = loss self.d = d self.dtype = dtype self.t = t self.L_values = L_values self.no_split = False ## Main Class class DecisionTreeClassifier: ############################# ######## 1. TRAINING ######## ############################# ######### FIT ########## def fit(self, X, y, weights, loss_func = cross_entropy, max_depth = 100, min_size = 2, C = None): ## Add data self.X = X self.y = y self.N, self.D = self.X.shape dtypes = [np.array(list(self.X[:,d])).dtype for d in range(self.D)] self.dtypes = ['quant' if (dtype == float or dtype == int) else 'cat' for dtype in dtypes] self.weights = weights ## Add model parameters self.loss_func = loss_func self.max_depth = max_depth self.min_size = min_size self.C = C ## Initialize nodes self.nodes_dict = {} self.current_ID = 0 initial_node = Node(Xsub = X, ysub = y, observations = np.arange(self.N), ID = self.current_ID, parent_ID = None) self.nodes_dict[self.current_ID] = initial_node self.current_ID += 1 # Build self._build() ###### BUILD TREE ###### def _build(self): eligible_buds = self.nodes_dict for layer in range(self.max_depth): ## Find eligible nodes for layer iteration eligible_buds = {ID:node for (ID, node) in self.nodes_dict.items() if (node.leaf == True) & (node.size >= self.min_size) & (~ct.all_rows_equal(node.Xsub)) & (len(np.unique(node.ysub)) > 1)} if len(eligible_buds) == 0: break ## split each eligible parent for ID, bud in eligible_buds.items(): ## Find split self._find_split(bud) ## Make split if not self.splitter.no_split: self._make_split() ###### FIND SPLIT ###### def _find_split(self, bud): ## Instantiate splitter splitter = Splitter() splitter.bud_ID = bud.ID ## For each (eligible) predictor... if self.C is None: eligible_predictors = np.arange(self.D) else: eligible_predictors = np.random.choice(np.arange(self.D), self.C, replace = False) for d in sorted(eligible_predictors): Xsub_d = bud.Xsub[:,d] dtype = self.dtypes[d] if len(np.unique(Xsub_d)) == 1: continue ## For each value... if dtype == 'quant': for t in np.unique(Xsub_d)[:-1]: L_condition = Xsub_d <= t ysub_L = bud.ysub[L_condition] ysub_R = bud.ysub[~L_condition] weights_L = self.weights[bud.observations][L_condition] weights_R = self.weights[bud.observations][~L_condition] loss = split_loss(ysub_L, ysub_R, weights_L, weights_R, loss = self.loss_func) if loss < splitter.loss: splitter._replace_split(loss, d, 'quant', t = t) else: for L_values in ct.possible_splits(np.unique(Xsub_d)): L_condition = np.isin(Xsub_d, L_values) ysub_L = bud.ysub[L_condition] ysub_R = bud.ysub[~L_condition] weights_L = self.weights[bud.observations][L_condition] weights_R = self.weights[bud.observations][~L_condition] loss = split_loss(ysub_L, ysub_R, weights_L, weights_R, loss = self.loss_func) if loss < splitter.loss: splitter._replace_split(loss, d, 'cat', L_values = L_values) ## Save splitter self.splitter = splitter ###### MAKE SPLIT ###### def _make_split(self): ## Update parent node parent_node = self.nodes_dict[self.splitter.bud_ID] parent_node.leaf = False parent_node.child_L = self.current_ID parent_node.child_R = self.current_ID + 1 parent_node.d = self.splitter.d parent_node.dtype = self.splitter.dtype parent_node.t = self.splitter.t parent_node.L_values = self.splitter.L_values ## Get X and y data for children if parent_node.dtype == 'quant': L_condition = parent_node.Xsub[:,parent_node.d] <= parent_node.t else: L_condition = np.isin(parent_node.Xsub[:,parent_node.d], parent_node.L_values) Xchild_L = parent_node.Xsub[L_condition] ychild_L = parent_node.ysub[L_condition] child_observations_L = parent_node.observations[L_condition] Xchild_R = parent_node.Xsub[~L_condition] ychild_R = parent_node.ysub[~L_condition] child_observations_R = parent_node.observations[~L_condition] ## Create child nodes child_node_L = Node(Xchild_L, ychild_L, child_observations_L, ID = self.current_ID, depth = parent_node.depth + 1, parent_ID = parent_node.ID) child_node_R = Node(Xchild_R, ychild_R, child_observations_R, ID = self.current_ID + 1, depth = parent_node.depth + 1, parent_ID = parent_node.ID) self.nodes_dict[self.current_ID] = child_node_L self.nodes_dict[self.current_ID + 1] = child_node_R self.current_ID += 2 ############################# ####### 2. PREDICTING ####### ############################# ###### LEAF MODES ###### def _get_leaf_modes(self): self.leaf_modes = {} for node_ID, node in self.nodes_dict.items(): if node.leaf: values, counts = np.unique(node.ysub, return_counts=True) self.leaf_modes[node_ID] = values[np.argmax(counts)] ####### PREDICT ######## def predict(self, X_test): # Calculate leaf modes self._get_leaf_modes() yhat = [] for x in X_test: node = self.nodes_dict[0] while not node.leaf: if node.dtype == 'quant': if x[node.d] <= node.t: node = self.nodes_dict[node.child_L] else: node = self.nodes_dict[node.child_R] else: if x[node.d] in node.L_values: node = self.nodes_dict[node.child_L] else: node = self.nodes_dict[node.child_R] yhat.append(self.leaf_modes[node.ID]) return np.array(yhat) ``` With the weighted decision tree constructed, we are ready to build our `AdaBoost` class. The class closely follows the algorithm introduced in the content section, which is copied below for convenience. _____ **Discrete AdaBoost Algorithm** Define the target variable to be $y_n \in \{-1, +1 \}$. 1. Initialize the weights with $w^1_n = \frac{1}{N}$ for $n = 1, 2, \dots, N$. 2. For $t = 1, \dots, T$, - Build weak learner $t$ using weights $\mathbf{w}^t$. - Calculate fitted values $f^t(\bx_n) \in \{-1, +1\}$ for $n = 1, 2, \dots, N$. Let $I^t_n$ equal 1 If $f^t(\bx_n) \neq y_n$ and 0 otherwise. That is, $I^t_n$ indicates whether learner $t$ misclassifies observation $n$. - Calculate the weighted error for learner $t$: $$ \epsilon^t = \frac{\sumN w^t_n I^t_n}{\sumN w^t_n}. $$ - Calculate the accuracy measure for learner $t$: $$ \alpha^t = \log\left(\frac{1-\epsilon^t}{\epsilon^t}\right). $$ - Update the weighting with $$ w^{t + 1}_n = w^t_n\exp(\alpha^tI^t_n), $$ for $n = 1, 2, \dots, N$. 3. Calculate the overall fitted values with $\hat{y}_n = \text{sign} \left( \sum_{t = 1}^T \alpha^t f^t(\bx_n)\right)$. _____ ``` class AdaBoost: def fit(self, X_train, y_train, T, stub_depth = 1): self.y_train = y_train self.X_train = X_train self.N, self.D = X_train.shape self.T = T self.stub_depth = stub_depth ## Instantiate stuff self.weights = np.repeat(1/self.N, self.N) self.trees = [] self.alphas = [] self.yhats = np.empty((self.N, self.T)) for t in range(self.T): ## Calculate stuff self.T_t = DecisionTreeClassifier() self.T_t.fit(self.X_train, self.y_train, self.weights, max_depth = self.stub_depth) self.yhat_t = self.T_t.predict(self.X_train) self.epsilon_t = sum(self.weights*(self.yhat_t != self.y_train))/sum(self.weights) self.alpha_t = np.log( (1-self.epsilon_t)/self.epsilon_t ) self.weights = np.array([w*(1-self.epsilon_t)/self.epsilon_t if self.yhat_t[i] != self.y_train[i] else w for i, w in enumerate(self.weights)]) ## Append stuff self.trees.append(self.T_t) self.alphas.append(self.alpha_t) self.yhats[:,t] = self.yhat_t self.yhat = np.sign(np.dot(self.yhats, self.alphas)) def predict(self, X_test): yhats = np.zeros(len(X_test)) for t, tree in enumerate(self.trees): yhats_tree = tree.predict(X_test) yhats += yhats_tree*self.alphas[t] return np.sign(yhats) ``` The `AdaBoost` model is finally fit below. To train the model, we supply the training data as well as `T`—the number of weak learners—and `stub_depth`—the depth for each tree (our weak learner). ``` booster = AdaBoost() booster.fit(X_train, y_train, T = 30, stub_depth = 3) yhat = booster.predict(X_test) np.mean(yhat == y_test) ``` ## 2. Regression with AdaBoost.R2 Next, let's implement *AdaBoost.R2*, a common boosting algorithm for regression tasks. We'll again use the {doc}`tips </content/appendix/data>` dataset from `seaborn`, loaded in the hidden code cell below. ``` ## Import packages import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import datasets ## Load data tips = sns.load_dataset('tips') X = np.array(tips.drop(columns = 'tip')) y = np.array(tips['tip']) ## Train-test split np.random.seed(1) test_frac = 0.25 test_size = int(len(y)*test_frac) test_idxs = np.random.choice(np.arange(len(y)), test_size, replace = False) X_train = np.delete(X, test_idxs, 0) y_train = np.delete(y, test_idxs, 0) X_test = X[test_idxs] y_test = y[test_idxs] ``` Since our boosting class will use regression trees for its weak learners, let's also import the regression tree we constructed in {doc}`Chapter 5 </content/c5/s2/regression_tree>`. ``` ## Import decision trees import import_ipynb import regression_tree as rt; ``` Recall that the final fitted values in *AdaBoost.R2* are based on a weighted median. Let's first make a helper function to return the weighted median. ``` def weighted_median(values, weights): sorted_indices = values.argsort() values = values[sorted_indices] weights = weights[sorted_indices] weights_cumulative_sum = weights.cumsum() median_weight = np.argmax(weights_cumulative_sum >= sum(weights)/2) return values[median_weight] ``` We can then fit the `AdaBoostR2` class. This again follows the algorithm closely, which is again copied below for convenience. ____ **AdaBoost.R2 Algorithm** 1. Initialize the weights with $w^1_n = \frac{1}{N}$ for $n = 1, 2, \dots, N$. 2. For $t = 1, 2, \dots, T$ or while $\bar{L}^t$, as defined below, is less than or equal to 0.5, - Draw a sample of size $N$ from the training data with replacement and with probability $w^t_n$ for $n = 1, 2, \dots, N$. - Fit weak learner $t$ to the resampled data and calculate the fitted values on the original dataset. Denote these fitted values with $f^t(\bx_{n})$ for $n = 1, 2, \dots, N$. - Calculate the observation error $L^t_{n}$ for $n = 1, 2, \dots, N$: $$ \begin{aligned} D^t &= \underset{n}{\text{max}} \{ |y_{n} - f^t(\bx_{n})| \} \\ L^t_{n} &= \frac{|y_{n} - f^t(\bx_{n})|}{D^t} \end{aligned} $$ - Calculate the model error $\bar{L}^t$: $$ \bar{L}^t = \sum_{n = 1}^N L^t_n w^t_n $$ If $\bar{L}^t \geq 0.5$, end iteration and set $T$ equal to $t - 1$. - Let $\beta^t = \frac{\bar{L}^t}{1- \bar{L}^t}$. The lower $\beta^t$, the greater our confidence in the model. - Let $Z^t = \sum_{n = 1}^N w^t_n (\beta^t)^{1 - L_n}$ and update the model weights with $$ w^{t + 1}_n = \frac{w^t_n (\beta^t)^{1 - L_n}}{Z^t}, $$ which increases the weight for observations with a greater error $L^t_n$. 3. Set the overall fitted value for observation $n$ equal to the weighted median of $f^t(\bx_n)$ for $t = 1, 2, \dots, T$ using weights $\log(1/\beta^t)$ for model $t$. ______ ``` class AdaBoostR2: def fit(self, X_train, y_train, T = 100, stub_depth = 1, random_state = None): self.y_train = y_train self.X_train = X_train self.T = T self.stub_depth = stub_depth self.N, self.D = X_train.shape self.weights = np.repeat(1/self.N, self.N) np.random.seed(random_state) self.trees = [] self.fitted_values = np.empty((self.N, self.T)) self.betas = [] for t in range(self.T): ## Draw sample, fit tree, get predictions bootstrap_indices = np.random.choice(np.arange(self.N), size = self.N, replace = True, p = self.weights) bootstrap_X = self.X_train[bootstrap_indices] bootstrap_y = self.y_train[bootstrap_indices] tree = rt.DecisionTreeRegressor() tree.fit(bootstrap_X, bootstrap_y, max_depth = stub_depth) self.trees.append(tree) yhat = tree.predict(X_train) self.fitted_values[:,t] = yhat ## Calculate observation errors abs_errors_t = np.abs(self.y_train - yhat) D_t = np.max(abs_errors_t) L_ts = abs_errors_t/D_t ## Calculate model error (and possibly break) Lbar_t = np.sum(self.weights*L_ts) if Lbar_t >= 0.5: self.T = t - 1 self.fitted_values = self.fitted_values[:,:t-1] self.trees = self.trees[:t-1] break ## Calculate and record beta beta_t = Lbar_t/(1 - Lbar_t) self.betas.append(beta_t) ## Reweight Z_t = np.sum(self.weights*beta_t**(1-L_ts)) self.weights *= beta_t**(1-L_ts)/Z_t ## Get median self.model_weights = np.log(1/np.array(self.betas)) self.y_train_hat = np.array([weighted_median(self.fitted_values[n], self.model_weights) for n in range(self.N)]) def predict(self, X_test): N_test = len(X_test) fitted_values = np.empty((N_test, self.T)) for t, tree in enumerate(self.trees): fitted_values[:,t] = tree.predict(X_test) return np.array([weighted_median(fitted_values[n], self.model_weights) for n in range(N_test)]) ``` Again, we fit our booster by providing training data in addition to `T`—the number of weak learners—and `stub_depth`—the depth for our regression tree weak learners. ``` booster = AdaBoostR2() booster.fit(X_train, y_train, T = 50, stub_depth = 4, random_state = 123) fig, ax = plt.subplots(figsize = (7,5)) sns.scatterplot(y_test, booster.predict(X_test)); ax.set(xlabel = r'$y$', ylabel = r'$\hat{y}$', title = 'Fitted vs. Observed Values for AdaBoostR2') sns.despine() ```
github_jupyter
## import modules ``` import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import DataLoader from torchvision import datasets, transforms import matplotlib.pyplot as plt ``` ## define model architecture ``` class ConvNet(nn.Module): def __init__(self): super(ConvNet, self).__init__() self.cn1 = nn.Conv2d(1, 16, 3, 1) self.cn2 = nn.Conv2d(16, 32, 3, 1) self.dp1 = nn.Dropout2d(0.10) self.dp2 = nn.Dropout2d(0.25) self.fc1 = nn.Linear(4608, 64) # 4608 is basically 12 X 12 X 32 self.fc2 = nn.Linear(64, 10) def forward(self, x): x = self.cn1(x) x = F.relu(x) x = self.cn2(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.dp1(x) x = torch.flatten(x, 1) x = self.fc1(x) x = F.relu(x) x = self.dp2(x) x = self.fc2(x) op = F.log_softmax(x, dim=1) return op ``` ## define training and inference routines ``` def train(model, device, train_dataloader, optim, epoch): model.train() for b_i, (X, y) in enumerate(train_dataloader): X, y = X.to(device), y.to(device) optim.zero_grad() pred_prob = model(X) loss = F.nll_loss(pred_prob, y) # nll is the negative likelihood loss loss.backward() optim.step() if b_i % 10 == 0: print('epoch: {} [{}/{} ({:.0f}%)]\t training loss: {:.6f}'.format( epoch, b_i * len(X), len(train_dataloader.dataset), 100. * b_i / len(train_dataloader), loss.item())) def test(model, device, test_dataloader): model.eval() loss = 0 success = 0 with torch.no_grad(): for X, y in test_dataloader: X, y = X.to(device), y.to(device) pred_prob = model(X) loss += F.nll_loss(pred_prob, y, reduction='sum').item() # loss summed across the batch pred = pred_prob.argmax(dim=1, keepdim=True) # us argmax to get the most likely prediction success += pred.eq(y.view_as(pred)).sum().item() loss /= len(test_dataloader.dataset) print('\nTest dataset: Overall Loss: {:.4f}, Overall Accuracy: {}/{} ({:.0f}%)\n'.format( loss, success, len(test_dataloader.dataset), 100. * success / len(test_dataloader.dataset))) ``` ## create data loaders ``` # The mean and standard deviation values are calculated as the mean of all pixel values of all images in the training dataset train_dataloader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1302,), (0.3069,))])), # train_X.mean()/256. and train_X.std()/256. batch_size=32, shuffle=True) test_dataloader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1302,), (0.3069,)) ])), batch_size=500, shuffle=False) ``` ## define optimizer and run training epochs ``` torch.manual_seed(0) device = torch.device("cpu") model = ConvNet() optimizer = optim.Adadelta(model.parameters(), lr=0.5) ``` ## model training ``` for epoch in range(1, 3): train(model, device, train_dataloader, optimizer, epoch) test(model, device, test_dataloader) ``` ## run inference on trained model ``` test_samples = enumerate(test_dataloader) b_i, (sample_data, sample_targets) = next(test_samples) plt.imshow(sample_data[0][0], cmap='gray', interpolation='none') plt.show() print(f"Model prediction is : {model(sample_data).data.max(1)[1][0]}") print(f"Ground truth is : {sample_targets[0]}") ```
github_jupyter
# Homework 5 - Liberatori Benedetta This exercise contains a modified version of the **magnitude_pruning** function from notebook 05-pruning.ipynb, to allow iterative pruning. The implementation of magnitude pruning is mask-based and the function takes as input a previously computed mask (default is None for the one-shot case or first case in the iteration). Then, the only modified step is the vectorization of the distribution of the parameters in absolute value. Here only those with a correspondent value of 1 in the mask are taken into account. N.b. this could have been implemented with just an additional line of code w.r.t. previous **magnitude_pruning** function: discarding all the 0s from *flat* before computing the threshold. This version does not make use of the mask and is clearly easier to write, yet it relies on the assumption that no parameter attains the value zero during training. For this reason the presented version have been preferred. ``` import torch from scripts import mnist, train_utils, architectures, train from scripts.train_utils import AverageMeter, accuracy def magnitude_pruning(model, pruning_rate, layers_to_prune=["1", "4", "7", "10"], prev_mask=None): # 1. vectorize distribution of abs(parameter) # including only the parameters in the correct layers # If prev_mask from a previus step in the pruning is not None # then consider only those parameters for which the # correspondent element in the mask is not 0, # i.e. already pruned. auxmask = [] params_to_prune = [] i = 0 if prev_mask is not None: for pars in model.named_parameters(): if any([l in pars[0] for l in layers_to_prune]) : params_to_prune.append(pars[1]) auxmask.append(prev_mask[i]) i += 1 flat = torch.cat([pars.abs().flatten() for pars in params_to_prune], dim=0) flatmask = torch.cat([m.flatten() for m in auxmask], dim =0) flat = flat[flatmask != 0] else: params_to_prune = [pars[1] for pars in model.named_parameters() if any([l in pars[0] for l in layers_to_prune]) ] flat = torch.cat([pars.abs().flatten() for pars in params_to_prune], dim=0) # 2. sort this distribution flat = flat.sort()[0] # 3. obtain the threshold position = int(pruning_rate * flat.shape[0]) thresh = flat[position] # 4. binarize the parameters & 5. compose these booleans into the mask # 6. obtain the new structure of parameters mask = [] for pars in model.named_parameters(): if any([l in pars[0] for l in layers_to_prune]) : m = torch.where(pars[1].abs() >= thresh, 1, 0) mask.append(m) pars[1].data *= m else: mask.append(torch.ones_like(pars[1])) # 7. final step return mask def number_of_ones_in_mask(mask): return sum([m.sum().item() for m in mask]) / sum([m.numel() for m in mask]) ``` Let us test it on a pretrained model. ``` trainloader, testloader, _, _ = mnist.get_data() loss_fn = torch.nn.CrossEntropyLoss() layers = [ {"n_in": 784, "n_out": 16, "batchnorm": False}, {"n_out": 32, "batchnorm": True}, {"n_out": 64, "batchnorm": True}, {"n_out": 10, "batchnorm": True} ] net = architectures.MLPCustom(layers) state_dict = torch.load("models_push/mlp_custom_mnist/mlp_custom_mnist.pt") net.load_state_dict(state_dict) train.test_model(net, testloader, loss_fn=loss_fn) # After one step pruning mask = magnitude_pruning(net, pruning_rate=0.25) number_of_ones_in_mask(mask) # After two steps mask = magnitude_pruning(net, pruning_rate=0.25, prev_mask=mask) number_of_ones_in_mask(mask) ``` So the number of ones dicreases and new parameters are pruned. Now this can be embedded into a function for **Iterative Magnitude Pruning**, in order to stratify pruning and training. For this purpose **Fine-Tune** have been used. Thus the additional inputs required are the number of pruning steps **niter** and the number of training epochs between pruning steps **num_epochs_per_iter**. ``` def train_epoch(model, dataloader, loss_fn, optimizer, loss_meter, performance_meter, performance, device, mask, layers_to_prune): for X, y in dataloader: X = X.to(device) y = y.to(device) optimizer.zero_grad() y_hat = model(X) loss = loss_fn(y_hat, y) loss.backward() ##### we must neutralize the gradient on the pruned params before the optimizer takes a step #### if mask is not None: for (name, param), m in zip(model.named_parameters(), mask): if any([l in name for l in layers_to_prune]): param.grad *= m ###### optimizer.step() acc = performance(y_hat, y) loss_meter.update(val=loss.item(), n=X.shape[0]) performance_meter.update(val=acc, n=X.shape[0]) def train_model(model, dataloader, loss_fn, optimizer, num_epochs, performance=accuracy, lr_scheduler=None, device=None, mask=None, layers_to_prune=["1", "4", "7", "10"]): if device is None: device = "cuda:0" if torch.cuda.is_available() else "cpu" print(f"Training on {device}") model = model.to(device) model.train() for epoch in range(num_epochs): loss_meter = AverageMeter() performance_meter = AverageMeter() print(f"Epoch {epoch+1} --- learning rate {optimizer.param_groups[0]['lr']:.5f}") train_epoch(model, dataloader, loss_fn, optimizer, loss_meter, performance_meter, performance, device, mask, layers_to_prune) print(f"Epoch {epoch+1} completed. Loss - total: {loss_meter.sum} - average: {loss_meter.avg}; Performance: {performance_meter.avg}") if lr_scheduler is not None: lr_scheduler.step() return loss_meter.sum, performance_meter.avg def IterativeMagnitudePruning(model, dataloader, loss_fn, optimizer, niter, num_epochs_per_iter, performance=accuracy, lr_scheduler=None, device=None, mask=None, layers_to_prune=["1", "4", "7", "10"] , pruning_rate=0.25): for i in range(niter): mask=magnitude_pruning(model, pruning_rate, layers_to_prune, prev_mask=mask) print(f"number of ones in mask: {number_of_ones_in_mask(mask)}") train_model(model, dataloader, loss_fn, optimizer, num_epochs_per_iter, performance=accuracy, lr_scheduler=None, device=None, mask=mask, layers_to_prune=layers_to_prune) ```
github_jupyter
# DataFrames Pt. 1 > DataFrames are the workhorse of pandas and are directly inspired by the R programming language. We can think of a DataFrame as a bunch of Series objects put together to share the same index. Let's use pandas to explore this topic! # In Pt. 1 we cover the following : * Create a basic DataFrame * Indexing * Selection * Dropping rows and cols and importance of inplace parameter. * Reasoning behind axis = 0 for rows and axis = 1 for columns. * Accessing rows and cols in DataFrame ``` import numpy as np import pandas as pd from numpy.random import randn # For having gridlines %%HTML <style type="text/css"> table.dataframe td, table.dataframe th { border: 1px black solid !important; color: black !important; } # Setting a seed -> Seed makes sure that we get the same random numbers. np.random.seed(101) df = pd.DataFrame(randn(5,4),['A','B','C','D','E'],['W','X','Y','Z']) df # Gives us a list of columns W X Y Z, and rows A B C D E. # Each of the columns is a pandas sereis, W X Y and Z are series sharing a common index. # That's what dataframe is, a bunch of series that shares an index. ``` # Indexing and Selection ``` df['W']# Grabs W column which looks like a series. Always use this way to grab a column. type(df['W']) # Shows that it is a series type(df) df.W # If familiar with SQL a lot of times while selecting a column you pass in the table.col_name and this works too! # Not recommended! # You can also pass in a list of columns df[['W','Z']] # Asking for multiple columns you get back a DataFrame while a single column is just a series. df['new'] = df['W']+df['Y'] # When creating a new columnm, we can define it as if it already exists and on the RHS of = sign use other cols with # arithmetic to make a new column. df ``` **To remove a column we use df.drop() and pass in the column name. Also we need to refer to axis = 1, by default axis is set to 0.** ``` df.drop('new',axis=1) # df.drop() usage doesn't actually affects the DataFrame as we can see below on calling DataFrame df. df # To actually remove column new we have to enter the parameter inplace = True # Pandas does this so that we do not lose valuable information while dropping. df.drop('new',axis=1,inplace=True) df#New column permanently removed. # df.drop() also used to drop rows. df.drop('E',axis=0) ``` **Another point of confusion is why are rows have axis = 0 and columns have axis = 1. Its reference comes back to NumPy. Since DataFrames are just fancy index markers on top of a NumPy Array.** ``` # As a proof of logic we can do the following df.shape ``` **Notice that df.shape is a tuple for a 2-D matrix, at 0 index is the number of rows and columns at index1.** **Therefore rows as axis = 0 and columns as axis = 1 .** # Rows * 1st method to grab a row : Based on the label of index * loc[] ``` df # Multiple ways to select rows by making use of method(). # 1. loc (location) -> Takes label as input df.loc['C'] # Though it is a method still we use square brackets here, that's how it works with Pandas. ``` **df.loc['row_you_want']** * returns a series. * Therefore, not only all columns are series but rows are as well and are returned as series when requested. > 2nd method to grab a row : Based on the index position instead of label # iloc : > index based location Used to pass in a numerical index position, even if axes are labelled by strings. ``` df.iloc[2] # Numerical based index. # To select subsets of rows and columns. Similar to numpy df.loc['B','Y']# df.loc['row_we_want','column_we_want'] # A to E rows with W & Y columns df.loc[['A','B','C','D','E'],['W','Y']] ``` # DataFrames Pt. 2 # In Pt. 2 we cover the following : * Conditional Selection * Single line v/s Multiple line abstraction * Using multiple conditions * Reason and fix for ambiguous series error * Modifying the index (Set Index v/s Reset Index) ``` # We can perform conditional selection in Pandas using bracket notation. df booldf = df > 0 #Using a comparison operator against the DataFrame gives a DataFrame of boolean values. # Similar to what happens to a numpy array when you just apply a conditional selection. booldf df[booldf] #We will get values where the condition was True and NaN (Not a Number) for all false locations. # Ideal way to do contional selection is what's defined below. What we did above was just for ease of understanding. df[df>0] ``` > **But the method above of conditional selection is also uncommon and most likely instead of just passing the entire data frame, we would pass a row or a column value and instead of returning NaN it will return only the rows or columns of the subsets of the dataframe where conditions are true.** ``` df df['W']>0 df['W'] # Value at index C is less than 0, so returns false for being greater than zero. # Now we can use the series of boolean values corresponding to rows which is shown above to filter out rows # based on a column's value. df[df['W']>0] # Returns only the rows where condition is true. We use this type of selection a lot! # As we are passing the series we do not get null values anymore. # Null values only occur when you perform a conditional statement on the entire DataFrame. # To grab all the rows in the dataframe where Z < 0 df[df['Z']<0] resultdf = df[df['W']>0] # Note that we get a DF in response. And this means we can call commands on this DF. # We can do so in 1 or 2 steps. resultdf resultdf['X'] # Grabbing the X column from the resultdf DF where C is not present. We do this here in 2 steps. # Doing it in 1 step will look like what's described below : df[df['W']>0]['X'] # Return the dataframe where column value of W > 0 i.e. All Rows - C. Then stacking [] bracket # notation on top of that. df[df['W']>0][['X','Y','Z']] #Since this is a dataframe we can bracket for multiple columns by passing in a list. # Line by line version for undestanding of the command above is boolser = df['W']>0 boolser result = df[boolser] result # Entire DataFrame without row C since it was False. mycols = ['X','Y','Z'] result[mycols] #Only print the mycols columns from result DataFrame. ``` # Using multiple conditions ``` df[(df['W']>0) and (df['Y']>1)]# W > 0 and Y>1 # Gives us an error saying "truth value of a Series is ambiguous." # It basically means python's and operator can't take into account one series of boolean value with respect to other. # and operator can take into account only boolean values. For instance print(True and True) print(False and True) # On passing the entire series of boolean values such as df['W']>0 # the and operator begins to get confused as it deals with single instances of boolean values only. # Workaround is to use an & while working with pandas. df[(df['W']>0) & (df['Y']>1)]# W > 0 and Y>1 ``` ### To avoid ambiguous series error : > * **Use & instead of and** > * **Use | instead of or** # Modifying the Index ``` df #Original DF # To reset the index in range 0....n-1 df.reset_index() # index gets reset to a column and actual index becomes numerical, all of this to prevent loss of #data. Again keep in mind it doesn't occurs inplace and calling back the original df will show us what is above. df # To make the change permanent use inplace in the following manner : # df.reset_index(inplace=True) df = pd.DataFrame(randn(5,4),['A','B','C','D','E'],['W','X','Y','Z']) # To get the original index with string index. df df.reset_index() # Old Index becomes a column of the dataframe. #Setting the index newind = 'CG MP UP TN OR'.split() # Creating a new index # .split() of a string is a common method for splitting off of all the blank space a quick way to create a list. newind # Putting the list above as column in the DataFrame df df['States'] = newind # Since the dimensions match. So it will add list as a column. df # We can see that a new column is added at the end of the df. # To make the states column as index make use of method set_index df.set_index('States') # States column becomes the index. # Note : Unless you retain the information of your old index, set_index will overwrite the old index and you # won't be able to retain the old index information as a new column. # df remains similar to originally defined as inplace = False which is by default. df ``` # DataFrames Pt. 3 ### In Pt. 3 we cover the following : * Multi-Index and Index Hierarchy * Calling Data from Multi Level Index * Cross-section xs * Aedvance review of multi-index topics and index hierachy > Let us go over how to work with Multi-Index, first we'll create a quick example of what a Multi-Indexed DataFrame would look like: ``` # Index Levels outside = ['G1','G1','G1','G2','G2','G2'] inside = [1,2,3,1,2,3] hier_index = list(zip(outside,inside)) # Using zip function along with list function to make it a list of tuple pairs hier_index = pd.MultiIndex.from_tuples(hier_index) # Takes in a list which looks like the one below and create a # multi_index from it. Upon execution this gives multiindex with several levels. list(zip(outside,inside)) df = pd.DataFrame(randn(6,2),hier_index,['A','B']) # Makes a DF of 6 rows and 2 columns, #index equal to hier index and cols A,B df # Gives a dataframe of 2 levels of index. Index1- G1,G2 and Index2 - 1,2,3 and cols A and B df.loc['G1'] # Returns sub dataframe of everything inside G1 df.loc['G1'].loc[1] # Gives everything inside G1's 1st row. #Basic idea is to call from outside index and continue calling in deeper one level. # To name the indexes of G1 and 1 2 3 we can do following : df.index.names # Gives output showing indices do not have a name shown by None df.index.names = ['Groups','Num'] df df.loc['G2'].loc[2]['B'] # To index G1 3 [A] -0.925874 df.loc['G1'].loc[3]['A'] df.xs('G1') # Returns a cross-section of rows and columns from a series of DF. Used with multi-level indexes. df.loc['G1'] # What's nice about cross-section xs is that it has ability to skip or go inside a multilevel index.\ # Say we have dataframe df df ``` * ***Aim : To grab all the values of number equal to 1 of G1 and G2 num 1 as well i.e. all values whose num = 1.*** * ***This is hard to achieve in .loc method. But it is easy to do using xs method.*** * ***Specify what you want as far as num = 1, and indicate the second argument level and name of index.*** ``` df.xs(1,level='Num') # We are able to grab a xs (cross-section) where level = 'Num' and is 1.b ``` # Great Work!
github_jupyter
<table> <tr> <td width=15%><img src="./img/UGA.png"></img></td> <td><center><h1>Introduction to Python for Data Sciences</h1></center></td> <td width=15%><a href="http://www.iutzeler.org" style="font-size: 16px; font-weight: bold">Franck Iutzeler</a> </td> </tr> </table> <br/><br/> <center><a style="font-size: 40pt; font-weight: bold">Chap. 4 - Scikit Learn </a></center> <br/><br/> # 2- Supervised Learning In the session, we will investigate some *examples* on how to deal with popular learning problems using standard algorithms. Many other problems and algorithms exist so this course is not at all exhaustive. ## Classification ``` import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_blobs %matplotlib inline # we create 40 separable points in R^2 around 2 centers (random_state=6 is a seed so that the set is separable) X, y = make_blobs(n_samples=40, n_features=2, centers=2 , random_state=6) print(X[:5,:],y[:5]) # print the first 5 points and labels plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) ``` Support Vector Machines (SVM) are based on learning a vector $w$ and an intercept $b$ such that the hyperplane $w^T x - b = 0$ separates the data i.e. $a$ belongs to one class if $w^T a - b > 0$ and the other elsewhere. They were later extended to *Kernel methods* that is $\kappa(w, a) - b = 0$ is now the separating *curve* where $\kappa$ is the *kernel*, typically: * linear: $\kappa(x,y)= x^T y$ (original SVM) * polynomial: $\kappa(x,y)= (x^T y)^d$ * Gaussian radial basis function (rfb): $\kappa(x,y)= \exp( - \gamma \| x - y \|^2 )$ ``` from sklearn.svm import SVC # Support vector classifier i.e. Classifier by SVM modelSVMLinear = SVC(kernel="linear") modelSVMLinear.fit(X,y) ``` The following illustration can be found in the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas. ``` def plot_svc_decision_function(model, ax=None, plot_support=True): """Plot the decision function for a 2D SVC""" if ax is None: ax = plt.gca() xlim = ax.get_xlim() ylim = ax.get_ylim() # create grid to evaluate model x = np.linspace(xlim[0], xlim[1], 30) y = np.linspace(ylim[0], ylim[1], 30) Y, X = np.meshgrid(y, x) xy = np.vstack([X.ravel(), Y.ravel()]).T P = model.decision_function(xy).reshape(X.shape) # plot decision boundary and margins ax.contour(X, Y, P, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) # plot support vectors if plot_support: ax.scatter(model.support_vectors_[:, 0], model.support_vectors_[:, 1], s=300, linewidth=1, facecolors='none'); ax.set_xlim(xlim) ax.set_ylim(ylim) plt.scatter(X[:, 0], X[:, 1], c=y , cmap=plt.cm.Paired) plot_svc_decision_function(modelSVMLinear) ``` We see clearly that the linear SVM seeks at maximizing the *margin* between the hyperplane and the two well defined classes from the data. ### Non-separable data In real cases, the data is usually not linearly separable as before. ``` # we create points in R^2 around 2 centers (random_state=48443 is a seed so that the set is *not* separable) X, y = make_blobs(n_samples=100, n_features=2, centers=2 , random_state=48443) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) ``` Let us use the *same* linear SVM classifier. Obviously, there are *misclassified points*, the model is thus learnt not by maximizing the margin (which does not exist anymore) but by minimizing a penalty over misclassified data. This penalty takes the form of an allowance margin controlled by a parameter $C$. The smaller $C$ the more inclusive the margin. Finding a good value for $C$ is up to the data scientist. ``` from sklearn.model_selection import train_test_split # sklearn > ... XTrain, XTest, yTrain, yTest = train_test_split(X,y,test_size = 0.5) # split data in two model1 = SVC(kernel="linear",C=0.01) model1.fit(XTrain,yTrain) model2 = SVC(kernel="linear",C=100) model2.fit(XTrain,yTrain) plt.scatter(XTrain[:, 0], XTrain[:, 1], c=yTrain , cmap=plt.cm.Paired) plot_svc_decision_function(model1) plt.title("C = 0.01") plt.scatter(XTrain[:, 0], XTrain[:, 1], c=yTrain , cmap=plt.cm.Paired) plot_svc_decision_function(model2) plt.title("C = 100") ``` To find out which value of $C$ to use or globally the performance of the classifier, one can use Scikit Learn's [classification metrics](http://scikit-learn.org/stable/modules/model_evaluation.html#classification-metrics), for instance the confusion matrix. ``` from sklearn.metrics import confusion_matrix yFit1 = model1.predict(XTest) yFit2 = model2.predict(XTest) mat1 = confusion_matrix(yTest, yFit1) mat2 = confusion_matrix(yTest, yFit2) print('Model with C = 0.01') print(mat1) print("Model with C = 100") print(mat2) ``` It can also be plotted in a fancier way with seaborn. ``` import seaborn as sns sns.heatmap(mat1, square=True, annot=True ,cbar=False) plt.ylabel('true label') plt.xlabel('predicted label') ``` ### Kernels When the separation between classes is not *linear*, kernels may be used to draw separating curves instead of lines. The most popular is the Gaussian rbf. ``` from sklearn.datasets import make_moons X,y = make_moons(noise=0.1) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) modelLinear = SVC(kernel="linear") modelLinear.fit(X,y) modelRbf = SVC(kernel="rbf") modelRbf.fit(X,y) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plot_svc_decision_function(modelLinear) plot_svc_decision_function(modelRbf) plt.title("The two models superposed") ``` Let us compare the linear and rbf training error using the zero one loss (the proportion of misclassified examples). ``` from sklearn.metrics import zero_one_loss yFitLinear = modelLinear.predict(X) yFitRbf = modelRbf.predict(X) print("0/1 loss -- Linear: {:.3f} Rbf: {:.3f}".format(zero_one_loss(y, yFitLinear),zero_one_loss(y, yFitRbf))) ``` ### Multiple classes Where there are multiples classes (as in the *iris* dataset of the Pandas notebook), different strategies can be adopted: * Transforming the multiclass problem into a binary one by looking at the *one-vs-rest* problem (for each class construct a binary classifier between it and the rest) or the *one-vs-one* one (where each couple of classes is considered separately). After this transformation, standard binary classifiers can be used. * Using dedicated algorithms such as *decision trees* The corresponding algorithms can be found in the [multiclass module documentation](http://scikit-learn.org/stable/modules/multiclass.html). We are going to illustrate this by the iris 3-class classification problem using only the 2 petal features (width and length, this is only so that the feature vector is 2D and easy to visualize). ``` import pandas as pd import numpy as np iris = pd.read_csv('data/iris.csv') classes = pd.DataFrame(iris["species"]) features = iris.drop(["species","sepal_length","sepal_width"],axis=1) classes.sample(6) features.sample(6) XTrain, XTest, yTrain, yTest = train_test_split(features,classes,test_size = 0.5) from sklearn.multiclass import OneVsRestClassifier yPred = OneVsRestClassifier(SVC()).fit(XTrain, yTrain).predict(XTest) print(yPred) # Note the classes are not number but everything went as expected class_labels= ['virginica' , 'setosa' , 'versicolor'] sns.heatmap(confusion_matrix(yTest, yPred), square=True, annot=True ,cbar=False, xticklabels= class_labels, yticklabels=class_labels) plt.ylabel('true label') plt.xlabel('predicted label') ``` ### Other classifiers The main classifiers from Scikit learn are: *Linear SVM, RBF SVM (as already seen), Nearest Neighbors, Gaussian Process, Decision Tree, Random Forest, Neural Net, AdaBoost, Naive Bayes, QDA*. Use is: from sklearn.neural_network import MLPClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis classifiers = [ KNeighborsClassifier(3), SVC(kernel="linear", C=0.025), SVC(gamma=2, C=1), GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True), DecisionTreeClassifier(max_depth=5), RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), MLPClassifier(alpha=1), AdaBoostClassifier(), GaussianNB(), QuadraticDiscriminantAnalysis()] ## Regression Let consider the problem of predicting real values from a set of features. We will consider the <a href="http://archive.ics.uci.edu/ml/datasets/Student+Performance">student performance</a> dataset. The goal is to predict the final grade from the other information, we get from the documentation: ``` import pandas as pd import numpy as np student = pd.read_csv('data/student-mat.csv') student.head() target = pd.DataFrame(student["G3"]) features = student.drop(["G3"],axis=1) ``` One immediate problem here is that the features are not *numeric* (not floats). Thankfully, Scikit Learn provides [encoders](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html#sklearn.preprocessing.LabelEncoder) to convert categorical (aka nominal, discrete) features to numerical ones. ``` from sklearn.preprocessing import LabelEncoder lenc = LabelEncoder() num_features = features.apply(lenc.fit_transform) num_features.head() ``` Even numerical values were encoded, as we are going to normalize, it is not really important. The normalization is done by removing the mean and equalizing the variance per feature, in addition, we are going to add an intercept. ``` from sklearn.preprocessing import StandardScaler, add_dummy_feature scaler = StandardScaler() normFeatures = add_dummy_feature(scaler.fit_transform(num_features)) preproData = pd.DataFrame(normFeatures , columns=[ "intercept" ] + list(num_features.columns) ) preproData.describe().T ``` ### Regression and Feature selection with the Lasso The lasso problem is finding a regressor $w$ such that minimizes $$ \frac{1}{2 n_{samples}} \|X w - y ||^2_2 + \alpha \|w\|_1 $$ and is popular for prediction as it simultaneously *selects features* thanks to the $\ell_1$-term. The greater $\alpha$ the fewer features. ``` from sklearn.model_selection import train_test_split # sklearn > ... from sklearn.linear_model import Lasso XTrain, XTest, yTrain, yTest = train_test_split(preproData,target,test_size = 0.25) model = Lasso(alpha=0.1) model.fit(XTrain,yTrain) ``` We can observe the regressor $w$ provided by the model, notice the sparsity. ``` model.coef_ ``` We can observe which coefficients are put to $0$ and which ones are positively/negatively correlated. ``` print("Value Feature") for idx,val in enumerate(model.coef_): print("{:6.3f} {}".format(val,preproData.columns[idx])) ``` Let us take a look at our predictions. ``` targetPred = model.predict(XTest) print("Predicted True") for idx,val in enumerate(targetPred): print("{:4.1f} {:.0f}".format(val,float(yTest.iloc[idx]))) ``` ### Regularization path Selecting a good parameter $\alpha$ is the role of the data scientist. For instance, a easy way to do is the following. ``` n_test = 15 alpha_tab = np.logspace(-10,1,base=2,num = n_test) print(alpha_tab) trainError = np.zeros(n_test) testError = np.zeros(n_test) featureNum = np.zeros(n_test) for idx,alpha in enumerate(alpha_tab): model = Lasso(alpha=alpha) model.fit(XTrain,yTrain) yPredTrain = model.predict(XTrain) yPredTest = model.predict(XTest) trainError[idx] = np.linalg.norm(yPredTrain-yTrain["G3"].values)/yTrain.count() testError[idx] = np.linalg.norm(yPredTest-yTest["G3"].values)/yTest.count() featureNum[idx] = sum(model.coef_!=0) alpha_opt = alpha_tab[np.argmin(testError)] import matplotlib.pyplot as plt import seaborn as sns sns.set() %matplotlib inline plt.subplot(311) plt.xscale("log") plt.plot(alpha_tab, trainError,label="train error") plt.xlim([min(alpha_tab),max(alpha_tab)]) plt.legend() plt.xticks([]) plt.axvline(x=alpha_opt) plt.ylabel("error") plt.subplot(312) plt.xscale("log") plt.plot(alpha_tab, testError,'r',label="test error") plt.xlim([min(alpha_tab),max(alpha_tab)]) #plt.ylim([0.19, 0.21]) plt.legend() plt.axvline(x=alpha_opt) plt.xticks([]) plt.ylabel("error") plt.subplot(313) plt.xscale("log") plt.scatter(alpha_tab, featureNum) plt.xlim([min(alpha_tab),max(alpha_tab)]) plt.ylim([0,28]) plt.axvline(x=alpha_opt) plt.ylabel("nb. of features") plt.xlabel("alpha") ``` ## Exercises > **Exercise:** a very popular binary classification exercise is the [survival prediction from Titanic shipwreck on Kaggle](https://www.kaggle.com/c/titanic) > > *The sinking of the RMS Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. This sensational tragedy shocked the international community and led to better safety regulations for ships.* > > *One of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew. Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class.* > > *In this challenge, we ask you to complete the analysis of what sorts of people were likely to survive. In particular, we ask you to apply the tools of machine learning to predict which passengers survived the tragedy.* > > > The data, taken from [Kaggle](https://www.kaggle.com/c/titanic) is located in `data/titanic/train.csv` and has the following form: <table> <tbody> <tr><th><b>Feature</b></th><th><b>Definition</b></th><th><b>Comment</b></th></tr> <tr> <td>PassengerId</td> <td>ID</td> <td>numeric</td> </tr> <tr> <td>Survival</td> <td>Survival of the passenger</td> <td>0 = No, 1 = Yes <b>target to predict</b></td> </tr> <tr> <td>Pclass</td> <td>Ticket class</td> <td>1 = 1st, 2 = 2nd, 3 = 3rd</td> </tr> <tr> <td>Name</td> <td>Full name w/ Mr. Mrs. etc.</td> <td>string</td> </tr> <tr> <td>Sex</td> <td>Sex</td> <td><tt>male</tt> or <tt>female</tt></td> </tr> <tr> <td>Age</td> <td>Age in years</td> <td>numeric</td> </tr> <tr> <td>SibSp</td> <td># of siblings / spouses aboard the Titanic</td> <td>numeric</td> </tr> <tr> <td>Parch</td> <td># of parents / children aboard the Titanic</td> <td></td> </tr> <tr> <td>Ticket</td> <td>Ticket number</td> <td>quite messy</td> </tr> <tr> <td>Fare</td> <td>Passenger fare</td> <td></td> </tr> <tr> <td>cabin</td> <td>Cabin number</td> <td>letter + number (e.g. C85), often missing</td> </tr> <tr> <td>Embarked</td> <td>Port of Embarkation</td> <td>C = Cherbourg, Q = Queenstown, S = Southampton</td> </tr> </tbody> </table> > * Load the dataset and preprocess the features. (you can remove features that seem uninteresting to you). > * Perform binary classification to predict the survival of a passenger depending on its information. > * Validate you method on the test set `data/titanic/test.csv` > * Perform some feature engineering to improve the performance of you classifier (see e.g. https://triangleinequality.wordpress.com/2013/09/08/basic-feature-engineering-with-the-titanic-data/). > **Exercise:** [House price prediction in Ames, Iowa on Kaggle](https://www.kaggle.com/c/house-prices-advanced-regression-techniques) > > The data, taken from [Kaggle](https://www.kaggle.com/c/house-prices-advanced-regression-techniques), is located in `data/house_prices/train.csv`. > > * Try to reach the best accurracy in terms of mean absolute error on the log of the prices: $$Error = \frac{1}{n} \sum_{i=1}^n | \log(predicted_i) - \log(true_i) |$$ > on the test set `data/house_prices/test.csv`. > * Which features (original or made up) are the most relevant? (see `data/house_prices/data_description.txt`)
github_jupyter
``` ''' Two Inputs : (i) a directory containing all images, (ii) scenic scenario script Output : a list of image_file names that belongs to the scenario''' import os import scenic from scenic.simulators.gta.nusc_query_api import NuscQueryAPI directory = "/Users/edwardkim/Desktop/nuScenes_data/samples/CAM_FRONT" scenic_script_path = 'examples/gta/test_boston_seaport.sc' # image_filename = [file for file in os.listdir(directory) if file.endswith('.jpg')] # extract image labels query = NuscQueryAPI(version = 'v1.0-trainval', dataroot='/Users/edwardkim/Desktop/nuScenes_data') map_name = 'boston-seaport' def deg_to_rad(degrees): return (degrees + 360) % 360 # <---- need to consider NuScenes reference angle to Scenic's reference angle NUM_CAR = 2 # excluding ego # construct scenario scenario = scenic.scenarioFromFile(scenic_script_path) from scenic.core.scenarios import Scene from scenic.core.vectors import Vector from scenic.core.distributions import Constant import math NUM_CAR = 1 # excluding ego imgs_in_scenario = [] # for loop starts here over all images scenario_copy = scenario obj_list = scenario_copy.dependencies img_filename = 'n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151603512404.jpg' label = query.get_img_data(img_filename) if query.img_filename_to_location[img_filename] == map_name: # skip if the num car condition not met if (len(label['Vehicles']) >= NUM_CAR): scenario_deps = [dep for dep in obj_list] # extract ego's NuScenes label values egoCar = label['EgoCar'] ego_position = egoCar['position'] ego_heading = egoCar['heading'] # condition ego obj's position and heading to NuScene's label values x_ego = scenario.egoObject x_ego.position.conditionTo(Vector(ego_position[0], ego_position[1])) x_ego.heading.conditionTo(Constant(ego_heading - math.pi/2)) scenario_copy.egoObject = x_ego scenario_deps[0] = x_ego # extract NuScenes' non-ego car's labels otherVehicles = label['Vehicles'] for i in range(NUM_CAR): scenic_car = scenario_deps[i+1] # +1 added to skip ego car nuscenes_car = otherVehicles[i] nusc_car_position = nuscenes_car['position'] nusc_car_heading = nuscenes_car['heading'] # condition on nuscenes' position and heading values scenic_car.position.conditionTo(Vector(nusc_car_position[0], nusc_car_position[1])) scenic_car.heading.conditionTo(Constant(nusc_car_heading - math.pi/2)) # save the conditioned scenic car_obj scenario_deps[i+1] = scenic_car scenario_copy.dependencies = tuple(scenario_deps) scene = scenario_copy.generate(maxIterations = 1) if isinstance(scene, Scene): print("in scenario: ", img_filename) print("NuScenes label: ", label) cfg = scenic.simulators.gta.interface.GTA.Config(scene) print("scenic sample: ") print(cfg.vehicles) print(cfg.time) print(cfg.weather) print(cfg.location) print(cfg.view_heading) imgs_in_scenario.append(img_filename) m = query.get_whole_map('boston-seaport') print(m) label = query.get_img_data('n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151603512404.jpg') print(label) ```
github_jupyter
# Evolutionary Grammar Fuzzing In this chapter, we introduce how to implement [search-based test generation](SearchBasedFuzzing.ipynb) on grammars, using _genetic improvement_ operators such as mutation and cross-over on derivation trees. **Prerequisites** * You should have read the [chapter on search-based test generation](SearchBasedFuzzer.ipynb). * You should have read the [chapter on recombining inputs](LangFuzzer.ipynb). ``` import fuzzingbook_utils import SearchBasedFuzzer import LangFuzzer ``` ## Grammar-Based Mutation General idea: Take a derivation tree and a matching grammar; apply a random mutation. ``` from Grammars import EXPR_GRAMMAR from GrammarFuzzer import display_tree from Parser import EarleyParser parser = EarleyParser(EXPR_GRAMMAR) tree, *_ = parser.parse("1 + 2 * 3") display_tree(tree) ``` 1. Pick any node in the tree 2. Produce a new expansion. We have seen this for `LangFuzzer` already, right? How about we factor this out (from the Parser notebook), and have two notebook on mutational (and genetic fuzzing): 1. `LangFuzzer` – a chapter on * Mutating trees (randomly) * Mutating trees from a given population (the LangFuzz approach) * Tree recombination (and crossover) 2. `EvoGrammarFuzzer` – a chapter on * Genetic improvement (using coverage only) * Genetic improvement (using a fitness function from search-based fuzzing) ``` def mutate_tree(tree, grammar): pass ``` ## Grammar-Based Crossover ## Lessons Learned * _Lesson one_ * _Lesson two_ * _Lesson three_ ## Next Steps _Link to subsequent chapters (notebooks) here, as in:_ * [use _mutations_ on existing inputs to get more valid inputs](MutationFuzzer.ipynb) * [use _grammars_ (i.e., a specification of the input format) to get even more valid inputs](Grammars.ipynb) * [reduce _failing inputs_ for efficient debugging](Reducer.ipynb) ## Background _Cite relevant works in the literature and put them into context, as in:_ The idea of ensuring that each expansion in the grammar is used at least once goes back to Burkhardt \cite{Burkhardt1967}, to be later rediscovered by Paul Purdom \cite{Purdom1972}. ## Exercises _Close the chapter with a few exercises such that people have things to do. To make the solutions hidden (to be revealed by the user), have them start with_ ```markdown **Solution.** ``` _Your solution can then extend up to the next title (i.e., any markdown cell starting with `#`)._ _Running `make metadata` will automatically add metadata to the cells such that the cells will be hidden by default, and can be uncovered by the user. The button will be introduced above the solution._ ### Exercise 1: _Title_ _Text of the exercise_ ``` # Some code that is part of the exercise pass ``` _Some more text for the exercise_ **Solution.** _Some text for the solution_ ``` # Some code for the solution 2 + 2 ``` _Some more text for the solution_ ### Exercise 2: _Title_ _Text of the exercise_ **Solution.** _Solution for the exercise_
github_jupyter
# Human Brain samples - MS Nature 2019 Rowitch dataset reprocessed ## Please download the input data before proceed Please extract the tarball to current working directory, input data would be in **./data** **Download link https://bit.ly/2F6o5n7** ``` import scanpy as sc import numpy as np import scipy as sp import pandas as pd import matplotlib.pyplot as plt from matplotlib import rcParams from matplotlib import colors import seaborn as sb import glob import rpy2.rinterface_lib.callbacks import logging from rpy2.robjects import pandas2ri import anndata2ri from scipy.sparse.csc import csc_matrix # Ignore R warning messages #Note: this can be commented out to get more verbose R output rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR) # Automatically convert rpy2 outputs to pandas dataframes pandas2ri.activate() anndata2ri.activate() %load_ext rpy2.ipython plt.rcParams['figure.figsize']=(8,8) #rescale figures sc.settings.verbosity = 3 #sc.set_figure_params(dpi=200, dpi_save=300) sc.logging.print_versions() results_file = './write/ms_nature_2019_rowitch_pp.h5ad' ``` ## Load human brain snRNAseq samples Here we load the pre-processed datasets (which has been annotated), and the raw matrices (which won't be filtered on the gene level). ### Raw data ``` wpath = "./data/" metafile = "all_samples.txt" meta = pd.read_csv( wpath + "/" + metafile, header = 0) meta # design # Set up data loading file_base = './data/' adatas_raw = [] # Loop to load data for i in range(len(meta['library_id'])): #Parse filenames sample = meta['library_id'][i] h5_file = file_base+sample+'/outs/filtered_feature_bc_matrix.h5' #Load data adata_tmp = sc.read_10x_h5(h5_file) adata_tmp.X = csc_matrix(adata_tmp.X) #Annotate data sampleID = sample.split('-rxn')[0] adata_tmp.obs['sample'] = ['MSsnRNAseq2019_'+sample]*adata_tmp.n_obs # adata_tmp.obs['study'] = ['MS_Nature_2019_Rowitch_snRNAseq']*adata_tmp.n_obs # adata_tmp.obs['chemistry'] = ['v2_10X']*adata_tmp.n_obs # adata_tmp.obs['tissue'] = ['Brain']*adata_tmp.n_obs # adata_tmp.obs['species'] = ['Human']*adata_tmp.n_obs # adata_tmp.obs['data_type'] = ['UMI']*adata_tmp.n_obs # adata_tmp.obs adata_tmp.var_names_make_unique() #Append to main adata object adatas_raw.append(adata_tmp) meta['sample_id'] = meta['library_id'].copy() meta['sample_id'] = meta['sample_id'].str.replace("_3PEE_ref", "") meta meta.shape # Concatenate to unique adata object adata_raw = adatas_raw[0].concatenate(adatas_raw[1:], batch_key='sample_ID', batch_categories=meta['sample_id']) adata_raw.obs['sample'] = adata_raw.obs['sample'].str.replace("_3PEE_ref", "") adata_raw.obs.head() adata_raw.obs.drop(columns=['sample_ID'], inplace=True) adata_raw.obs.head() adata_raw.obs.index.rename('barcode', inplace=True) adata_raw.obs.head() adata_raw.shape type(adata_raw.X) # adata_raw.X = csc_matrix(adata_raw.X) # Save merged object adata_raw.write(results_file) ``` # 1. Pre-processing and visualization ## 1.1 Quality control ``` adata_raw_copy = adata_raw.copy() sc.pp.calculate_qc_metrics(adata_raw, inplace=True) # Quality control - calculate QC covariates adata_raw.obs['n_counts'] = adata_raw.X.sum(1) adata_raw.obs['log_counts'] = np.log(adata_raw.obs['n_counts']) adata_raw.obs['n_genes'] = (adata_raw.X > 0).sum(1) # mt_gene_mask = [gene.startswith('MT-') for gene in adata_raw.var_names] # adata_raw.obs['mt_frac'] = adata_raw.X[:, mt_gene_mask].sum(1)/adata_raw.obs['n_counts'] mito_genes = adata_raw.var_names.str.startswith('MT-') adata_raw.obs['mt_frac'] = np.sum(adata_raw[:, mito_genes].X, axis=1) / np.sum(adata_raw.X, axis=1) # Quality control - plot QC metrics sc.pl.violin(adata_raw, ['n_genes', 'n_counts', 'mt_frac'],groupby='sample', jitter=0.4, multi_panel=False) sc.pl.scatter(adata_raw, x='n_counts', y='mt_frac') sc.pl.scatter(adata_raw, x='n_counts', y='n_genes', color='mt_frac') sc.pl.scatter(adata_raw[adata_raw.obs['n_counts'] < 20000], x='n_counts', y='n_genes', color='mt_frac') #Thresholding decision: counts p3 = sb.distplot(adata_raw.obs['n_counts'], kde=False, bins=200) plt.show() p4 = sb.distplot(adata_raw.obs['n_counts'][adata_raw.obs['n_counts']<4000], kde=False,bins=200) plt.show() p5 = sb.distplot(adata_raw.obs['n_counts'][adata_raw.obs['n_counts']>25000], kde=False, bins=60) plt.show() ``` Zoom-in histograms of the number of counts per cell show that there's a group of cells with n_counts < 3500, this would remove 47k out of 65k cells. But paper said cut at 1000 reads, stick with 1000 reads. On the upper end of the distribution, we can see that the high peak centered around 5000 counts spans until around 40000 counts. ``` # Filter cells according to identified QC thresholds: print('Total number of cells: {:d}'.format(adata_raw.n_obs)) sc.pp.filter_cells(adata_raw, min_counts = 1000) print('Number of cells after min count filter: {:d}'.format(adata_raw.n_obs)) sc.pp.filter_cells(adata_raw, max_counts = 40000) print('Number of cells after max count filter: {:d}'.format(adata_raw.n_obs)) adata_raw = adata_raw[adata_raw.obs['mt_frac'] < 0.2] print('Number of cells after MT filter: {:d}'.format(adata_raw.n_obs)) # look at the effect of thresholding sc.pl.scatter(adata_raw, x='n_counts', y='n_genes', color='mt_frac') #Thresholding decision: genes p6 = sb.distplot(adata_raw.obs['n_genes'], kde=False, bins=60) plt.show() p7 = sb.distplot(adata_raw.obs['n_genes'][adata_raw.obs['n_genes']<1500], kde=False, bins=60) plt.show() ``` From the histograms of the number of genes per cell, we can notice that there still is a small population showing n_genes < 600 which should be filtered out. But paper said 500 ``` # Thresholding on number of genes print('Total number of cells: {:d}'.format(adata_raw.n_obs)) sc.pp.filter_cells(adata_raw, min_genes = 600) print('Number of cells after gene filter: {:d}'.format(adata_raw.n_obs)) #Filter genes: print('Total number of genes: {:d}'.format(adata_raw.n_vars)) # Min 20 cells - filters out 0 count genes sc.pp.filter_genes(adata_raw, min_cells=20) print('Number of genes after cell filter: {:d}'.format(adata_raw.n_vars)) # Save merged object adata_raw.write('./write/ms_nature_2019_rowitch_done_QC_filter_46kcell_25kgene.h5ad') ``` ### Normalization ``` adata_raw = sc.read_h5ad('./write/ms_nature_2019_rowitch_done_QC_filter_46kcell_25kgene.h5ad') sc.pp.normalize_per_cell(adata_raw, counts_per_cell_after=1e6) sc.pp.log1p(adata_raw) # sc.pp.pca(adata_pp, n_comps=15, svd_solver='arpack') # sc.pp.neighbors(adata_pp) # sc.tl.louvain(adata_pp, key_added='groups', resolution=0.5) adata_raw.write('./write/ms_nature_2019_rowitch_filtered_normalized_log1p_non_scaled.h5ad') import gc gc.collect() infile = './write/ms_nature_2019_rowitch_filtered_normalized_log1p_non_scaled.h5ad' adata_raw = sc.read_h5ad(infile) def mod_index(meta): meta['index'] = meta['index'].str.replace("_3PEE_ref", "") return meta # attach exisiting harmony and liger coordinates # harmony adata_harmony = sc.read_h5ad("./data/harmony_clustered.h5ad") adata_harmony.obs.index = adata_harmony.obs.index.str.replace("_3PEE_ref", "") adata_harmony.obs # subset adata_raw to match same cells cells = list(set(adata_raw.obs.index) & set(adata_harmony.obs.index)) adata_raw = adata_raw[cells] xpca = pd.DataFrame(adata_harmony.obsm['X_pca']).set_index(adata_harmony.obs.index) xtsne = pd.DataFrame(adata_harmony.obsm['X_tsne']).set_index(adata_harmony.obs.index) xumap = pd.DataFrame(adata_harmony.obsm['X_umap']).set_index(adata_harmony.obs.index) adata_raw.obsm['X_pca_harmony'] = np.array(xpca.loc[adata_raw.obs.index]) adata_raw.obsm['X_tsne_harmony'] = np.array(xtsne.loc[adata_raw.obs.index]) adata_raw.obsm['X_umap_harmony'] = np.array(xumap.loc[adata_raw.obs.index]) adata_raw.obs['louvain_harmony'] = adata_harmony.obs['louvain'].loc[adata_raw.obs.index] adata_raw.obs = adata_raw.obs.astype({'louvain_harmony':'category'}) # liger xtsne = pd.read_csv("./data/liger_runumap.tsne.coords.txt", sep='\t', encoding='utf-8') xumap = pd.read_csv("./data/liger_runumap.umap.coords.txt", sep='\t', encoding='utf-8') xlouvain = pd.read_csv("./data/liger_clusterID.txt", sep='\t', encoding='utf-8') xtsne = mod_index(xtsne) xumap = mod_index(xumap) xlouvain['index'] = xlouvain['barcode'] xlouvain = mod_index(xlouvain) xumap.set_index('index', inplace=True) xtsne.set_index('index', inplace=True) xlouvain.set_index('index', inplace=True) adata_raw.obsm['X_tsne_liger'] = np.array(xtsne.loc[adata_raw.obs.index]) adata_raw.obsm['X_umap_liger'] = np.array(xumap.loc[adata_raw.obs.index]) adata_raw.obs['louvain_liger'] = np.array(xlouvain.loc[adata_raw.obs.index]['clusterID']) adata_raw.obs = adata_raw.obs.astype({'louvain_liger':'category'}) outfile = infile outfile = outfile.replace(".h5ad","") adata_raw.write_h5ad(outfile+"_with_embedings.h5ad") import gc gc.collect() ``` # attach meta data from the paper ``` xmeta = pd.read_csv("./data/meta.tsv", sep='\t', encoding='utf-8') xmeta.index = xmeta['cell'].str.replace("_.*_.*","")+"-"+xmeta['sample']+"_10x" xmeta xmeta.loc[set(set(xmeta.index) & set(adata_raw.obs.index))][['Capbatch','Seqbatch','cell_type','diagnosis','region','sample','sex','stage']] features = ['Capbatch','Seqbatch','cell_type','diagnosis','region','sample','sex','stage'] bcodes = set(set(xmeta.index) & set(adata_raw.obs.index)) for f in features: adata_raw.obs[f] = 'nan' adata_raw.obs[f].loc[bcodes] = xmeta[f].loc[bcodes] set(adata_raw.obs['cell_type']) adata_raw.obs['>Description'] = ['Human brain snRNAseq 46k cells (MS Nature 2019 Schirmer et al.); data - normalized, log transformed UMI; platform - 10X v2 chemistry | embedding by umap_harmony; color by cell_type']*adata_raw.n_obs outfile = infile outfile = outfile.replace(".h5ad","") adata_raw.write_h5ad(outfile+"_with_embedings_and_labels.h5ad") ```
github_jupyter
# Mimblewimble ## Resources: ### Software: - Get rust at: [www.rust-lang.org](https://www.rust-lang.org) - Get jupyter notebook directly at [jupyter.org](https://www.jupyter.org) or through anaconda distribution at [anaconda.com](https://www.anaconda.com) - get rust jupyter kernel at [https://github.com/google/evcxr/blob/master/evcxr_jupyter/README.md](https://github.com/google/evcxr/blob/master/evcxr_jupyter/README.md) or run the code normally ### Mimblewimble - "Official" mimblewimble implementation [https://github.com/mimblewimble/grin/blob/master/doc/intro.md](https://github.com/mimblewimble/grin/blob/master/doc/intro.md) - Helpful article expleining mimblewimble [https://medium.com/@brandonarvanaghi/grin-transactions-explained-step-by-step-fdceb905a853](https://medium.com/@brandonarvanaghi/grin-transactions-explained-step-by-step-fdceb905a853) - Aggregate schnorr signatures [https://blockstream.com/2018/01/23/en-musig-key-aggregation-schnorr-signatures/](https://blockstream.com/2018/01/23/en-musig-key-aggregation-schnorr-signatures/) # Mimblewimble History In __2013__ Adam Back proposes confidential transactions in his bitcointalk post "bitcoins with homomorphic value" [https://bitcointalk.org/index.php?topic=305791.0](https://bitcointalk.org/index.php?topic=305791.0) In __Aug. 2016__, Someone called Tom Elvis Jedusor (Voldemort's French name in J.K. Rowling's Harry Potter book series) placed the original MimbleWimble white paper on a bitcoin research channel, and then disappeared. Tom's white paper "Mimblewimble" (a tongue-tying curse used in "The Deathly Hallows") was a blockchain proposal that could theoretically increase privacy, scalability and fungibility. In __Oct. 2016__, Andrew Poelstra, a mathematician at Blockstream, wrote a precise paper, made precise Tom's original idea, and added further scaling improvements on it. A __few days later__, Ignotus Peverell (name also came from "Harry Potter", the original owner of the invisibility cloak, if you know the Harry Potter characters) started a Github project called Grin, and began turning the MimbleWimble paper into something real. And in __Mar. 2017__, Ignotus Peverell posted a technical introduction to MimbleWimble and Grin on Github. # Mimblewimble deepdive ``` :dep curve25519-dalek = "1.1.3" rand = "0.6.5" sha2 = "0.8.0" extern crate curve25519_dalek; extern crate rand; extern crate sha2; use curve25519_dalek::constants; use curve25519_dalek::ristretto::CompressedRistretto; use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::scalar::Scalar; use rand::prelude::*; use sha2::{Sha256, Digest}; let mut rng = rand::thread_rng(); ``` ## Discrete logarithm problem ![11hrclock](img/11hrclock.png) - given the Generator _G = 3_ and the point _P = 2_ (publlic key) it is extremely difficult (assuming large numbers) to get the multiplicator _r_ (private key) that satisfies <div align="center">_P = r*G_</div> - however, knowing _r_ it is easy to compute _P_ ## Schnorr signatures - private key _r_, public key _U_ with <div align="center">_U = r*G_</div> - signer generates random nonce _rt_ and computes commitment to nonce <div align="center">_Ut = rt*G_</div> - using challenge _c=H(m,Ut)_ (challenge has to be unique for message _m_ and nonce _rt_) signer computes <div align="center">_rz = rt + c*r_</div> - signer sends _(Ut,rz)_ to verifier - verifier checks <div align="center">_rz\*G = Ut + c\*U_</div> - which can be expressed as <div align="center">_rz\*G = rt\*G + c\*r\*G_</div> ``` //get generator for the elliptic curve points let G = &constants::RISTRETTO_BASEPOINT_POINT; //pick arbitrary private key let r = Scalar::from_bytes_mod_order([2u8;32]); //compute public key let U = r*G; //generate random nonce, has to be different every time let mut temp: [u8;32] = [0u8;32]; temp.copy_from_slice((0..32).map(|x| rng.gen()).collect::<Vec<u8>>().as_slice()); let rt = Scalar::from_bytes_mod_order(temp); //calculate commitment to nonce let Ut = rt*G; //generate challenge from hashfunction let mut hasher = Sha256::new(); hasher.input("message".as_bytes()); hasher.input(Ut.compress().to_bytes()); temp.copy_from_slice(hasher.result().as_slice()); let c = Scalar::from_bytes_mod_order(temp); let rz = rt + c*r; //check whether signature is valid assert_eq!(rz*G,Ut+c*U); (rz*G).compress() ``` ## Simple aggregate schnorr signatures (insecure!!!) - two signers with private keys _r1,r2_ and public keys _U1,U2_ with <div align="center">_U1 = r1\*G,&nbsp; &nbsp; U2 = r2\*G_</div> - signers generate random nonces _rt1,rt2_ and compute commitments to the nonces <div align="center">_Ut1 = rt1\*G,&nbsp; &nbsp; Ut2 = rt2\*G_</div> - using challenge _c=H(m,Ut1+Ut2,U1+U2)_ (this is insecure!!!, see secure version [here](https://blockstream.com/2018/01/23/en-musig-key-aggregation-schnorr-signatures/)) signers compute <div align="center">_rz1 = rt1 + c\*r1,&nbsp; &nbsp; rz2 = rt2 + c\*r2 _</div> - signers send _(Ut1,rz1),(Ut2,rz2)_ to verifier - verifier checks <div align="center">_rz\*G = Ut + c\*U_</div> <div align="center">_(rz1 + rz2)\*G = (Ut1 + Ut2) + c\*(U1 + U2)_</div> - aggregate signatures allow to simply add puplic keys and signatures <div align="center">_U = U1 + U2_</div> <div align="center">_(Ut,rz) = (Ut1 + Ut2, rz1 + rz2)_</div> ``` //pick arbitrary private keys let r1 = Scalar::from_bytes_mod_order([3u8;32]); let r2 = Scalar::from_bytes_mod_order([4u8;32]); //compute public key let U1 = r1*G; let U2 = r2*G; //generate random nonces, has to be different every time let mut temp: [u8;32] = [0u8;32]; temp.copy_from_slice((0..32).map(|x| rng.gen()).collect::<Vec<u8>>().as_slice()); let rt1 = Scalar::from_bytes_mod_order(temp); let mut temp: [u8;32] = [0u8;32]; temp.copy_from_slice((0..32).map(|x| rng.gen()).collect::<Vec<u8>>().as_slice()); let rt2 = Scalar::from_bytes_mod_order(temp); //calculate commitment to nonce let Ut1 = rt1*G; let Ut2 = rt2*G; //generate challenge from hashfunction let mut hasher = Sha256::new(); hasher.input("message".as_bytes()); hasher.input((Ut1+Ut2).compress().to_bytes()); hasher.input((U1+U2).compress().to_bytes()); temp.copy_from_slice(hasher.result().as_slice()); let c = Scalar::from_bytes_mod_order(temp); let rz1 = rt1 + c*r1; let rz2 = rt2 + c*r2; let U = U1 + U2; let rz = rz1 + rz2; let Ut = Ut1 + Ut2; //check whether signature is valid assert_eq!(rz*G,Ut+c*U); (rz*G).compress() ``` ## UTXO transactions ![img](img/utxo.png) ## Example transaction - Alice has 100 tokens and wants to pay Bob 60 - with the UTXO model Alice will use her input _vi0 = 100_ to pay herself the output _vo0 = 40_ and Bob the output _vo1 = 60_ - no transactions fees apply - in order to not generate money out of nothing, the inputs must equal the ouputs <div align="center">_vi0 = vo0 + vo1_</div> ``` let zeros: [u8;32] = [0u8;32]; let mut vi0 = zeros.clone(); vi0[0] = 100u8; let vi0 = Scalar::from_bytes_mod_order(vi0); let mut vo0 = zeros.clone(); vo0[0] = 40u8; let vo0 = Scalar::from_bytes_mod_order(vo0); let mut vo1 = zeros.clone(); vo1[0] = 60u8; let vo1 = Scalar::from_bytes_mod_order(vo1); //check whether input equals output assert_eq!(vi0,vo0+vo1); vi0 ``` ## Hiding - in order to obscure the values of the transaction, one can multiply every term by the point _H_ on an elliptic curve, this yields <div align="center">_vi0\*H = vo0\* H + vo1\*H_</div> - similar to the dlog problem, for people not knowing _vi0, vo0, vo1_ it is almost impossible to obtain them now - however, the inputs must still equal the outputs ``` //get point on the curve, it is important that the relation between G and H is unknown let H = RistrettoPoint::random(&mut rng); assert_eq!(vi0*H,vo0*H+vo1*H); (vi0*H).compress() ``` ## Blinding - the problem now is that, the people that transacted with you know the value of the transactions values and it gets easy for them to deduce your following transactions (if they know you have 100, they can try every combination below 100 to see what you spend on your next output) - the aim is to replace every input and output by its corresponding pedersen commitment <div align="center">_v\*H -> r\*G + v\*H_</div> - where _r_ is called blinding factor and _G_ is another point on the curve - every input and ouput has its own blinding factor - in the context of mimblewimble _r_ can be thought of as a private key to the corresponding output and it is only known by the owner of that output ## Main idea: - each participant uses the sum of his pedersen commitments for the outputs minus the sum of the pedersen commitments for the inputs as his public key <div align="center">_U1 = (ro0\*G + vo0\*H) - (ri0\*G + vi0\*H)_</div> <div align="center">_U2 = (ro1\*G + vo1*H)_</div> - the private key for each participant is then the sum of the blinding factors of the outputs minus the inputs <div align="center">_r1 = (ro0 - ri0)_</div> <div align="center">_r2 = ro1_</div> ## Validating transactions - public key for sender is sum of pedersen commitments (output - input) <div align="center">_U1 = (ro0 - ri0)\*G + (vo0 - vi0)\*H_</div> - public key of reciever is sum of pedersen commitments (output - input) <div align="center">_U2 = ro1\*G + vo1\*H_</div> - both generate random nonces _rt1,rt2_ and compute commitments to the nonces <div align="center">_Ut1 = rt1\*G,&nbsp; &nbsp; Ut2 = rt2\*G_</div> - using challenge _c=H(m,Ut1+Ut2,U1+U2)_ signers compute <div align="center">_rz1 = rt1 + c\*(ro0 - ri0),&nbsp; &nbsp; rz2 = rt2 + c\*ro1 _</div> - signers send _(Ut1,rz1),(Ut2,rz2)_ to verifier - verifier checks <div align="center">_(rz1 + rz2)\*G = (Ut1 + Ut2) + c\*(U1 + U2)_</div> - which is equal to <div align="center">_(rz1 + rz2)\*G = (Ut1 + Ut2) + c\*((ro0 - ri0)\*G + (vo0 - vi0)\*H + ro1\*G + vo1\*H)_</div> - if the following condition holds <div align="center">_0 = vo0\* H - vi0\*H + vo1\*H_</div> - this can be simplified to the valid aggregate schnorr signature <div align="center">_(rz1 + rz2)\*G = (rt1\*G + rt2\*G) + c\*((ro0 - ri0)\*G + ro1\*G)_</div> - vice versa, a valid signature means that the inputs and outputs cancel out ``` //initialize the blinding factors let mut ri0 = zeros.clone(); ri0[0] = 10u8; let ri0 = Scalar::from_bytes_mod_order(ri0); let mut ro0 = zeros.clone(); ro0[0] = 20u8; let ro0 = Scalar::from_bytes_mod_order(ro0); let mut ro1 = zeros.clone(); ro1[0] = 30u8; let ro1 = Scalar::from_bytes_mod_order(ro1); //compute public key let U1 = (ro0 - ri0)*G + (vo0 - vi0)*H; let U2 = ro1*G + vo1*H; //generate random nonces, has to be different every time let mut temp: [u8;32] = [0u8;32]; temp.copy_from_slice((0..32).map(|x| rng.gen()).collect::<Vec<u8>>().as_slice()); let rt1 = Scalar::from_bytes_mod_order(temp); let mut temp: [u8;32] = [0u8;32]; temp.copy_from_slice((0..32).map(|x| rng.gen()).collect::<Vec<u8>>().as_slice()); let rt2 = Scalar::from_bytes_mod_order(temp); //calculate commitment to nonce let Ut1 = rt1*G; let Ut2 = rt2*G; //generate challenge from hashfunction let mut hasher = Sha256::new(); hasher.input("message".as_bytes()); hasher.input((Ut1+Ut2).compress().to_bytes()); hasher.input((U1+U2).compress().to_bytes()); temp.copy_from_slice(hasher.result().as_slice()); let c = Scalar::from_bytes_mod_order(temp); let rz1 = rt1 + c*(ro0 - ri0); let rz2 = rt2 + c*ro1; let U = U1 + U2; let rz = rz1 + rz2; let Ut = Ut1 + Ut2; //check whether signature is valid assert_eq!(rz*G,Ut+c*U); (rz*G).compress() ```
github_jupyter
``` import networkx as nx import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline def golden_figsize(height): """ Assuming height dimension is the shorter one, the width should be: (1 + 5**0.5)/2 This function thus returns the (width, height) tuple which is computed to be in a golden ratio. """ width = height * (1 + 5**0.5) / 2 return (width, height) G = nx.read_gpickle('20141103 All IRD Final Graph.pkl') # Add in ecotype information # Load the data df = pd.read_csv('Country-Species_coded.csv', index_col=0) df['Habitat/setting'] = df['Habitat/setting'].replace('Domestic ', 'Domestic') # Make it into a dicitonary where each entry is (country, species):habitat/setting ecotype_map = dict() for row, data in df.iterrows(): country = data['Country'] species = data['Species'] ecotype = data['Habitat/setting'] ecotype_map[(country, species)] = ecotype # Add the ecotype into each node. for n, d in G.nodes(data=True): country = d['country'] species = d['host_species'] if '/' in species: species = species.split('/')[0] if 'null' in species: species = 'Unknown' G.node[n]['ecotype'] = ecotype_map[(country, species)] def correct_metadata(G, node): """ Helper function for providing the correct subtype graph metadata. """ d = G.node[node] subtype = d['subtype'] ecotype = d['ecotype'] host = d['host_species'] if ecotype == 'Unknown': ecotype = 'W' if host == 'Human': ecotype = 'H' if ecotype == 'Domestic': ecotype = 'D' if ecotype == 'Wild': ecotype = 'W' subtype = subtype + '_' + ecotype return ecotype, subtype, host from collections import Counter tcounter = Counter() # Plot the number of ecotype jumps detected. for sc, sk, d in G.edges(data=True): if d['edge_type'] == 'reassortant': sc_ecotype, sc_subtype, sc_host = correct_metadata(G, sc) sk_ecotype, sk_subtype, sk_host = correct_metadata(G, sk) transition = sc_ecotype + '::' + sk_ecotype tcounter[transition] += 1 tcounter # Plot the data as a bar graph, using the same order as in the Intersubtype Interactions at Ecotype Interfaces """ Order desired: 1. Wild->Wild 2. Wild->Domestic 3. Domestic->Domestic 4. Domestic->Wild 5. Human->Domestic 6. Human->Human 7. Wild->Human 8. Domestic->Human 9. Human->Wild """ data_order = ['W::W', 'W::D', 'D::D', 'D::W', 'H::D', 'H::H', 'W::H', 'D::H', 'H::W'] bar_heights = [tcounter[i] for i in data_order] xs = [i for i, n in enumerate(bar_heights)] fig = plt.figure(figsize=(3, 1.5)) ax1 = fig.add_subplot(1,1,1) #background ax ax2 = fig.add_subplot(2,1,1) #top ax ax3 = fig.add_subplot(2,1,2) #bottom ax # Set unnecessary things to be invisible. ax1.xaxis.set_visible(False) ax1.yaxis.set_ticks([]) ax2.xaxis.set_visible(False) ax1.spines['left'].set_visible(False) ax1.spines['right'].set_visible(False) ax2.spines['bottom'].set_visible(False) ax3.spines['top'].set_visible(False) # Set plotting parameters ax3.set_xticks(np.arange(0, len(xs))) ax2.set_xticks(np.arange(0, len(xs))) ax3.xaxis.set_ticklabels(data_order, rotation=30, ha='right') ax2.xaxis.tick_top() ax3.xaxis.tick_bottom() # Set the y-limits on top and bottom plots ax2.set_ylim(300, 3601) ax2.set_yticks(np.arange(300, 3601, 900)) ax3.set_ylim(0, 250) ax3.set_yticks(np.arange(0, 251, 75)) # Plot the bar charts import matplotlib.colors as colors ax2.bar(xs, bar_heights, align='center', color=colors.hex2color('#AA4639')) ax3.bar(xs, bar_heights, align='center', color=colors.hex2color('#AA4639')) # Add in diagonal lines d = .015 # how big to make the diagonal lines in axes coordinates # arguments to pass plot, just so we don't keep repeating them kwargs = dict(transform=ax2.transAxes, color='k', clip_on=False) ax2.plot((-d,+d),(-d,+d), **kwargs) # bottom-left diagonal ax2.plot((1-d,1+d),(-d,+d), **kwargs) # bottom-right diagonal kwargs.update(transform=ax3.transAxes) # switch to the bottom axes ax3.plot((-d,+d),(1-d,1+d), **kwargs) # top-left diagonal ax3.plot((1-d,1+d),(1-d,1+d), **kwargs) # top-right diagonal ax1.set_ylabel('Num. Interactions\n\n\n') # ax2.set_title('Ecotype Interaction Counts') ax1.annotate('g.', xy=(0,1), xycoords='figure fraction', va='top', ha='left') plt.subplots_adjust(left=0.22, right=0.95, bottom=0.18, top=0.95) plt.savefig('Ecotype Interaction Counts.pdf') # Of interest are the reverse zoonosis events, where human ecotype jumps back into wild or domestic animal ecotype. # What's the list of such ecotype jumps? rz_events = [] # reverse zoonosis events rz_subtypes = [] # subtypes involved in reverse_zoonosis rz_hosts = [] # hosts involved in reverse zoonosis rz_dates = [] # time stamps involved in reverse zoonosis rz_timedeltas = [] # time deltas in reverse zoonosis for sc, sk, d in G.edges(data=True): sc_ecotype, sc_subtype, sc_host = correct_metadata(G, sc) sk_ecotype, sk_subtype, sk_host = correct_metadata(G, sk) sc_date = G.node[sc]['collection_date'] sk_date = G.node[sk]['collection_date'] if sc_ecotype == 'H' and sk_ecotype != 'H': rz_events.append((sc, sk)) rz_subtypes.append((sc_subtype, sk_subtype)) rz_hosts.append((sc_host, sk_host)) rz_dates.append((sc_date, sk_date)) rz_timedeltas.append(sk_date - sc_date) Counter(rz_hosts) plt.hist([i.days for i in rz_timedeltas]) Counter(rz_subtypes) def correct_ecotype(G, node): ecotype = G.node[node]['ecotype'] host = G.node[node]['host_species'] if ecotype in ['Unknown', 'unknown']: ecotype = 'Wild' if host == 'Human': ecotype = 'Human' return ecotype # Let's create a figure for this. The figure will be a 2" x 3" panel, highlighting only the reverse zoonosis events. # The nodes on the left will be wild animal isolates, and the nodes on the right will be domestic animal isolates. # Human nodes will take the center portion. # The nodes will be colored in the same way as on the hive plots: red for human, green for domestic animals, and blue for # wild animal. # The edges will be colored in the same way as on the hive plots: blue for different subtype interactions, and green for # # same subtype interactions. fig = plt.figure(figsize=(2,3)) ax = fig.add_subplot(111) nodes = dict() nodes['Wild'] = [] nodes['Human'] = [] nodes['Domestic'] = [] edges = dict() edges['Human-Wild'] = [] edges['Human-Domestic'] = [] for sc, sk, d in G.edges(data=True): sc_ecotype = correct_ecotype(G, sc) sk_ecotype = correct_ecotype(G, sk) if sc_ecotype == 'Human' and sk_ecotype != 'Human': if sc not in nodes[sc_ecotype]: nodes[sc_ecotype].append(sc) if sk not in nodes[sk_ecotype]: nodes[sk_ecotype].append(sk) ecotype_transition = '{0}-{1}'.format(sc_ecotype, sk_ecotype) edges[ecotype_transition].append((sc, sk)) # Sort the items by subtype for k, v in nodes.items(): nodes[k] = sorted(v, key=lambda x:G.node[x]['subtype']) # Scale factor scale = 2 maxnum = max([len(v) for k, v in nodes.items()]) node_radius = 1 print(maxnum) # Wild goes on top ax.set_xlim(-12*node_radius, maxnum + node_radius + 10) ax.set_ylim(-12*node_radius, maxnum + node_radius + 10) for i, node in enumerate(nodes['Wild']): xpos = float(i + 1) ypos = maxnum - node_radius c = plt.Circle(xy=(xpos, ypos), radius=node_radius, color='blue') ax.add_patch(c) # Next comes Humans for i, node in enumerate(nodes['Human']): xpos = float(i + 1) ypos = maxnum * 0.5 c = plt.Circle(xy=(xpos, ypos), radius=node_radius, color='red') ax.add_patch(c) # Finally comes Domestic animals for i, node in enumerate(nodes['Domestic']): xpos = float(i + 1) ypos = node_radius c = plt.Circle(xy=(xpos, ypos), radius=node_radius, color='green') ax.add_patch(c) # Plot each edge as a straight line. for tset, edgelist in edges.items(): if tset == 'Human-Wild': ys = [maxnum*0.5, maxnum-node_radius] if tset == 'Human-Domestic': ys = [maxnum*0.5, node_radius] for edge in edgelist: sc, sk = edge if tset == 'Human-Wild': xs = [nodes['Human'].index(sc), nodes['Wild'].index(sk)] if tset == 'Human-Domestic': xs = [nodes['Human'].index(sc), nodes['Domestic'].index(sk)] sc_subtype = G.node[sc]['subtype'] sk_subtype = G.node[sk]['subtype'] if sc_subtype == sk_subtype: color='green' else: color='blue' plt.plot(xs, ys, color=color, alpha=0.1, zorder=0) # Annotate in the W, H or D labels to the left of each axis. ax.annotate('W', xy=(-10*node_radius, maxnum-node_radius), xycoords='data', ha='right', va='center') ax.annotate('H', xy=(-10*node_radius, maxnum*0.5), xycoords='data', ha='right', va='center') ax.annotate('D', xy=(-10*node_radius, node_radius), xycoords='data', ha='right', va='center') # Add in two arrows to indicate the flow of sequences. ax.arrow(-10,maxnum*0.5+10, 0,maxnum*1/10, fc='k', ec='k', head_width=node_radius*2, head_length=node_radius*3, lw=node_radius/float(2)) ax.arrow(-10,maxnum*0.5-10, 0,-maxnum*1/10, fc='k', ec='k', head_width=node_radius*2, head_length=node_radius*3, lw=node_radius/float(2)) # Annotate the sub-figure letter. ax.annotate('b.', xy=(0,1), xycoords='figure fraction', ha='left', va='top') # Remove all spines from the axes object, and set x/y-ticks to be absent. for spine in ax.spines.keys(): ax.spines[spine].set_visible(False) ax.set_xticks([]) ax.set_yticks([]) plt.savefig('Reverse Zoonosis.pdf') ```
github_jupyter
# Module 6. Amazon SageMaker Deployment for EIA(Elastic Inference Accelerator) --- ***[주의] 본 모듈은 PyTorch EIA 1.3.1 버전에서 훈련을 수행한 모델만 배포가 가능합니다. 코드가 정상적으로 수행되지 않는다면, 프레임워크 버전을 동일 버전으로 맞춰 주시기 바랍니다.*** 본 모듈에서는 Elastic Inference Accelerator(EIA)를 사용하여 모델을 배포해 보겠습니다. ### Elastic Inference Accelerator 훈련 인스턴스와 달리 실시간 추론 인스턴스는 계속 상시로 띄우는 경우가 많기에, 딥러닝 어플리케이션에서 low latency를 위해 GPU 인스턴스를 사용하면 많은 비용이 발생합니다. Amazon Elastic Inference는 저렴하고 메모리가 작은 GPU 기반 가속기를 Amazon EC2, Amazon ECS, Amazon SageMaker에 연결할 수 있는 서비스로, Accelerator가 CPU 인스턴스에 프로비저닝되고 연결됩니다. EIA를 사용하면 GPU 인스턴스에 근접한 퍼포먼스를 보이면서 인스턴스 실행 비용을 최대 75%까지 절감할 수 있습니다. 모든 Amazon SageMaker 인스턴스 유형, EC2 인스턴스 유형 또는 Amazon ECS 작업을 지원하며, 대부분의 딥러닝 프레임워크를 지원하고 있습니다. 지원되는 프레임워크 버전은 AWS CLI로 확인할 수 있습니다. ```bash $ aws ecr list-images --repository-name tensorflow-inference-eia --registry-id 763104351884 $ aws ecr list-images --repository-name pytorch-inference-eia --registry-id 763104351884 $ aws ecr list-images --repository-name mxnet-inference-eia --registry-id 763104351884 ``` 참조: https://aws.amazon.com/ko/blogs/korea/amazon-elastic-inference-gpu-powered-deep-learning-inference-acceleration/ <br> ## 1. Inference script --- 아래 코드 셀은 `src` 디렉토리에 SageMaker 추론 스크립트인 `inference_eia.py`를 저장합니다.<br> Module 5의 코드와 대부분 동일하지만, `model_fn()` 메서드의 구현이 다른 점을 유의해 주세요. ``` import os import time import sagemaker from sagemaker.pytorch.model import PyTorchModel role = sagemaker.get_execution_role() %%writefile ./src/inference_eia.py from __future__ import absolute_import import argparse import json import logging import os import sys import time import random from os.path import join import numpy as np import io import tarfile import boto3 from PIL import Image import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.optim import lr_scheduler import torch.optim as optim import torchvision import copy import torch.utils.data import torch.utils.data.distributed from torchvision import datasets, transforms, models from torch import topk logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(sys.stdout)) JSON_CONTENT_TYPE = 'application/json' def model_fn(model_dir): logger.info("==> model_dir : {}".format(model_dir)) traced_model = torch.jit.load(os.path.join(model_dir, 'model_eia.pth')) return traced_model # Deserialize the request body def input_fn(request_body, request_content_type='application/x-image'): print('An input_fn that loads a image tensor') print(request_content_type) if request_content_type == 'application/x-image': img = np.array(Image.open(io.BytesIO(request_body))) elif request_content_type == 'application/x-npy': img = np.frombuffer(request_body, dtype='uint8').reshape(137, 236) else: raise ValueError( 'Requested unsupported ContentType in content_type : ' + request_content_type) img = 255 - img img = img[:,:,np.newaxis] img = np.repeat(img, 3, axis=2) test_transforms = transforms.Compose([ transforms.ToTensor() ]) img_tensor = test_transforms(img) return img_tensor # Predicts on the deserialized object with the model from model_fn() def predict_fn(input_data, model): logger.info('Entering the predict_fn function') start_time = time.time() input_data = input_data.unsqueeze(0) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.to(device) model.eval() input_data = input_data.to(device) result = {} with torch.no_grad(): logits = model(input_data) pred_probs = F.softmax(logits, dim=1).data.squeeze() outputs = topk(pred_probs, 5) result['score'] = outputs[0].detach().cpu().numpy() result['class'] = outputs[1].detach().cpu().numpy() print("--- Elapsed time: %s secs ---" % (time.time() - start_time)) return result # Serialize the prediction result into the response content type def output_fn(pred_output, accept=JSON_CONTENT_TYPE): return json.dumps({'score': pred_output['score'].tolist(), 'class': pred_output['class'].tolist()}), accept ``` <br> ## 2. TorchScript Compile (Tracing) --- PyTorch 프레임워크에서 EI를 사용하기 위해서는 [TorchScript](https://pytorch.org/docs/1.3.1/jit.html)로 모델을 컴파일해야 하며, 2020년 8월 시점에서는 PyTorch 1.3.1을 지원하고 있습니다. TorchScript는 PyTorch 코드에서 직렬화 및 최적화 가능한 모델로 컴파일하며 Python 인터프리터의 글로벌 인터프리터 잠금 (GIL)과 무관하기 때문에 Python 외의 언어에서 로드 가능하고 최적화가 용이합니다. TorchScript로 변환하는 방법은 **tracing** 방식과 **scripting** 방식이 있으며, 본 핸즈온에서는 tracing 방식을 사용하겠습니다. <br> 참고로 tracing 방식은 샘플 입력 데이터를 모델에 입력 후 그 입력의 흐름(feedforward)을 기록하여 포착하는 메커니즘이며, scripting 방식은 모델 코드를 직접 분석해서 컴파일하는 방식입니다. ### Install dependencies ``` import sys !{sys.executable} -m pip install --upgrade pip --trusted-host pypi.org --trusted-host files.pythonhosted.org !{sys.executable} -m pip install https://download.pytorch.org/whl/cpu/torchvision-0.4.2%2Bcpu-cp36-cp36m-linux_x86_64.whl !{sys.executable} -m pip install https://s3.amazonaws.com/amazonei-pytorch/torch_eia-1.3.1-cp36-cp36m-manylinux1_x86_64.whl !{sys.executable} -m pip install graphviz==0.13.2 !{sys.executable} -m pip install mxnet-model-server==1.0.8 !{sys.executable} -m pip install pillow==7.1.0 !{sys.executable} -m pip install sagemaker_containers !{sys.executable} -m pip install -U sagemaker ``` ### Compile Tracing 방식은 특정 input을 모델에 적용했을 때 수행되면서 operation이 저장하기 때문에, 이미지 사이즈와 동일한 크기의 랜덤 입력 데이터를 모델을 적용해야 합니다. ``` import torch, os from torchvision import models model_dir = './model' print("==> model_dir : {}".format(model_dir)) model = models.resnet18(pretrained=True) last_hidden_units = model.fc.in_features model.fc = torch.nn.Linear(last_hidden_units, 186) model.load_state_dict(torch.load(os.path.join(model_dir, 'model.pth'))) import torch data = torch.rand(1,3,137,236) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.to(device) input_data = data.to(device) with torch.jit.optimized_execution(True, {'target_device': 'eia:0'}): traced_model = torch.jit.trace(model, input_data) ``` 컴파일한 모델로 로컬 환경에서 추론을 수행해 보겠습니다. ``` from src.inference_eia import model_fn, input_fn, predict_fn, output_fn from PIL import Image import numpy as np import json file_path = 'test_imgs/test_0.jpg' with open(file_path, mode='rb') as file: img_byte = bytearray(file.read()) data = input_fn(img_byte) result = predict_fn(data, traced_model) print(result) ``` TorchScript 모델을 파일로 직렬화하여 저장합니다. 그런 다음, `tar.gz`로 압축하고 이 파일을 S3로 복사합니다. ``` torch.jit.save(traced_model, './model/model_eia.pth') tar_filename = 'model_eia.tar.gz' !cd model/ && tar -czvf $tar_filename model_eia.pth artifacts_dir = 's3://sagemaker-us-east-1-143656149352/pytorch-training-2020-08-16-04-47-36-618/output/' !aws s3 cp model/$tar_filename $artifacts_dir ``` <br> ## 3. SageMaker Hosted Endpoint Inference --- SageMaker가 관리하는 배포 클러스터를 프로비저닝하는 시간이 소요되기 때문에 추론 서비스를 시작하는 데에는 약 5~10분 정도 소요됩니다. ``` import boto3 client = boto3.client('sagemaker') runtime_client = boto3.client('sagemaker-runtime') def get_model_path(sm_client, max_results=1, name_contains='pytorch'): training_job = sm_client.list_training_jobs(MaxResults=max_results, NameContains=name_contains, SortBy='CreationTime', SortOrder='Descending') training_job_name = training_job['TrainingJobSummaries'][0]['TrainingJobName'] training_job_description = sm_client.describe_training_job(TrainingJobName=training_job_name) model_path = training_job_description['ModelArtifacts']['S3ModelArtifacts'] return model_path #model_path = get_model_path(client, max_results=3) model_path = os.path.join(artifacts_dir, tar_filename) print(model_path) endpoint_name = "endpoint-bangali-classifier-eia-{}".format(int(time.time())) pytorch_model = PyTorchModel(model_data=model_path, role=role, entry_point='./src/inference_eia.py', framework_version='1.3.1', py_version='py3') predictor = pytorch_model.deploy(instance_type='ml.c5.large', initial_instance_count=1, accelerator_type='ml.eia2.large', endpoint_name=endpoint_name, wait=False) # client = boto3.client('sagemaker') # waiter = client.get_waiter('endpoint_in_service') # waiter.wait(EndpointName=endpoint_name) import boto3 client = boto3.client('sagemaker') runtime_client = boto3.client('sagemaker-runtime') endpoint_name = pytorch_model.endpoint_name client.describe_endpoint(EndpointName = endpoint_name) ``` 추론을 수행합니다. (`ContentType='application/x-image'`) ``` with open(file_path, mode='rb') as file: img_byte = bytearray(file.read()) response = runtime_client.invoke_endpoint( EndpointName=endpoint_name, ContentType='application/x-image', Accept='application/json', Body=img_byte ) print(response['Body'].read().decode()) %timeit runtime_client.invoke_endpoint(EndpointName=endpoint_name, ContentType='application/x-image', Accept='application/json', Body=img_byte) ``` ### SageMaker Hosted Endpoint Clean-up 엔드포인트를 계속 사용하지 않는다면, 불필요한 과금을 피하기 위해 엔드포인트를 삭제해야 합니다. SageMaker SDK에서는 `delete_endpoint()` 메소드로 간단히 삭제할 수 있으며, UI에서도 쉽게 삭제할 수 있습니다. ``` def delete_endpoint(client, endpoint_name): response = client.describe_endpoint_config(EndpointConfigName=endpoint_name) model_name = response['ProductionVariants'][0]['ModelName'] client.delete_model(ModelName=model_name) client.delete_endpoint(EndpointName=endpoint_name) client.delete_endpoint_config(EndpointConfigName=endpoint_name) print(f'--- Deleted model: {model_name}') print(f'--- Deleted endpoint: {endpoint_name}') print(f'--- Deleted endpoint_config: {endpoint_name}') delete_endpoint(client, endpoint_name) ```
github_jupyter
# Agent prototype: Create ML training set from PPPDB ``` import json import os import pandas as pd import transfer_auth import search_client from globus_sdk import TransferData, GlobusError from gmeta_utils import gmeta_pop, format_gmeta s_client = search_client.SearchClient("https://search.api.globus.org/", "mdf") transfer_client = transfer_auth.login() dataset_name = "pppdb" local_ep = "0bc1cb98-d2af-11e6-9cb1-22000a1e3b52" dest_ep = "82f1b5c6-6e9b-11e5-ba47-22000b92c6ec" dest_path = "/sample_data/"+dataset_name+"_train.csv" timeout = False timeout_intervals = 10 interval_time = 10 if not local_ep: pgr_res = transfer_client.endpoint_search(filter_scope="my-endpoints") ep_candidates = pgr_res.data if len(ep_candidates) < 1: #Nothing found raise GlobusError("Error: No local endpoints found") elif len(ep_candidates) == 1: #Exactly one candidate if ep_candidates[0]["gcp_connected"] == False: #Is GCP, is not on raise GlobusError("Error: Globus Connect is not running") else: #Is GCServer or GCP and connected local_ep = ep_candidates[0]["id"] else: # >1 found #Filter out disconnected GCP ep_connections = [candidate for candidate in ep_candidates if candidate["gcp_connected"] is not False] #Recheck list if len(ep_connections) < 1: #Nothing found raise GlobusError("Error: No local endpoints running") elif len(ep_connections) == 1: #Exactly one candidate if ep_connections[0]["gcp_connected"] == False: #Is GCP, is not on raise GlobusError("Error: Globus Connect is not active") else: #Is GCServer or GCP and connected local_ep = ep_connections[0]["id"] else: # >1 found #Prompt user print("Multiple endpoints found:") count = 0 for ep in ep_connections: count += 1 print(count, ": ", ep["display_name"], "\t", ep["id"]) print("\nPlease choose the endpoint on this machine") ep_num = 0 while ep_num == 0: usr_choice = input("Enter the number of the correct endpoint (-1 to cancel): ") try: ep_choice = int(usr_choice) if ep_choice == -1: #User wants to quit ep_num = -1 #Will break out of while to exit program elif ep_choice in range(1, count+1): #Valid selection ep_num = ep_choice #Break out of while, return valid ID else: #Invalid number print("Invalid selection") except: print("Invalid input") if ep_num == -1: print("Cancelling") sys.exit() local_ep = ep_connections[ep_num-1]["id"] ``` # Fetch and aggregate records into training set ``` count = 0 num_processed = 0 data_list = [] while True: query = { "q": ("mdf.source_name:"+dataset_name+" AND mdf.resource_type:record AND " "mdf.scroll_id:(>=" + str(count) + " AND <" + str(count + 10000) + ")"), "advanced": True, "limit": 10000 } raw_res = s_client.structured_search(query) search_res = gmeta_pop(raw_res) for res in search_res: data_dict = json.loads(res["mdf"]["raw"]) data_list.append(data_dict) num_ret = len(search_res) if num_ret: num_processed += num_ret count += 10000 else: break print(len(data_list), "/", num_processed, "|", len(data_list) - num_processed) df = pd.DataFrame(data_list) df.to_csv(os.path.join(os.getcwd(), "temp_train.csv")) ``` # Upload to NCSA endpoint ``` try: tdata = TransferData(transfer_client, local_ep, dest_ep, verify_checksum=True, notify_on_succeeded=False, notify_on_failed=False, notify_on_inactive=False) tdata.add_item(os.path.join(os.getcwd(), "temp_train.csv"), dest_path) res = transfer_client.submit_transfer(tdata) if res["code"] != "Accepted": raise GlobusError("Failed to transfer files: Transfer " + res["code"]) else: intervals = 0 while not transfer_client.task_wait(res["task_id"], timeout=interval_time, polling_interval=interval_time): for event in transfer_client.task_event_list(res["task_id"]): if event["is_error"]: transfer_client.cancel_task(res["task_id"]) raise GlobusError("Error: " + event["description"]) if timeout and intervals >= timeout_intervals: transfer_client.cancel_task(res["task_id"]) raise GlobusError("Transfer timed out.") intervals += 1 except Exception as e: raise finally: os.remove(os.path.join(os.getcwd(), "temp_train.csv")) ``` # Update dataset entry ``` query = { "q": "mdf.source_name:"+dataset_name+" AND mdf.resource_type:dataset", "advanced": True } raw_res = s_client.structured_search(query) search_res = gmeta_pop(raw_res) if len(search_res) != 1: raise ValueError("Incorrect number of results: " + str(len(search_res))) ingest = search_res[0] ingest["globus_subject"] = raw_res["gmeta"][0]["subject"] ingest["mdf"]["acl"] = ["public"] ingest["mdf"]["links"]["training_set"] = { "endpoint": dest_ep, "path": dest_path, "https": "https://data.materialsdatafacility.org" + dest_path } gmeta = format_gmeta([format_gmeta(ingest)]) s_client.ingest(gmeta) ``` # Check ingest ``` query = { "q": "mdf.source_name:"+dataset_name+" AND mdf.resource_type:dataset", "advanced": True } raw_res = s_client.structured_search(query) search_res = gmeta_pop(raw_res) search_res[0]["mdf"]["links"]["training_set"] ```
github_jupyter
# OLS regressions - baseline for Capstone analysis In this notebook, I perform OLS regressions using systemwide CaBi trips as the dependent variable. ``` from util_functions import * import numpy as np import pandas as pd import statsmodels.formula.api as smf import matplotlib.pyplot as plt import seaborn as sns; sns.set_style('darkgrid') import statsmodels.graphics.gofplots as gofplots %matplotlib inline set_env_path() conn, cur = aws_connect() query = """ SELECT *, CASE day_of_week WHEN 5 THEN 1 WHEN 6 THEN 1 ELSE 0 END AS weekend_dummy, from final_db""" df = pd.read_sql(query, con=conn) df.shape ``` ### First specification attempt - theory based A lot of the variation in daily CaBi rides can be explained by weather. I decided on the following specification based on trial and error and intuition. For our ML analysis, we will want to look into ways to perform feature selection algorithmically (I'm looking into this right now). That said, the variables I've chosen are fairly arbitrary and could probably be improved, but we shouldn't spend a huge amount of time on baseline stuff. I made sure to try to avoid multicollinearity, for example high and low temperature, population and date, and all of the CaBi data are all highly correlated. ``` def fitOLS(equation, cov='nonrobust'): ''' This function uses statsmodels.ols to estimate OLS regressions using R/patsy-style syntax. Args: equation (str): A patsy-style regression equation. e.g. 'cabi_trips ~ apparenttemperaturehigh + daylight_hours + rain' cov (str): A specific covariance matrix type. Default is 'nonrobust'. HC0-HC3 available for heteroskedasticity-robust standard errors. Returns: results: A RegressionResults object which summarizes the fit of a linear regression model. ''' model = smf.ols('{}'.format(equation), df) results = model.fit(cov_type='{}'.format(cov), use_t=True) return results # Using the new weekend_dummy for demonstrative purposes results = fitOLS('cabi_trips ~ year + daylight_hours + ' 'apparenttemperaturehigh + rain + snow + ' 'nats_games + weekend_dummy', cov='HC0') results.summary() # Fit the model and print results # I wanted to use dc_pop instead of year (they're highly correlated) # But there are 0s in dc_pop that throw off the analysis results = fitOLS('cabi_trips ~ year + daylight_hours + ' 'apparenttemperaturehigh + rain + snow + ' 'nats_games + C(day_of_week)', cov='HC0') results.summary() ``` Our results look good. The R-squared tells us that about 74% of the variance in cabi_trips is explained by the variance in the explanatory variables. The low p-values indicate that the results we found are all statistically significant. Each of the coefficient estimates indicates the average change in daily CaBi trips associated with a one-unit increase in the explanatory variable, all else held equal. For dummy variables, this can be interpreted as an on-off switch, so on days when it snows, we should expect 1550 fewer rides. There are other things to worry about, though. Statistical programming packages often include diagnostic plots by default, but statsmodels doesn't. I explain three of these plots below. ``` '''Homoskedasticity is when the variance/scatter/spread of the residuals is constant for all values of the fitted values. It is an assumption under OLS. Heteroskedasticity is when the variance of the residuals changes as the fitted values change. If not addressed, it can lead to biased estimators. If our residuals were heteroskedastic, we would expect a scatter plot to form a funnel shape, and a regression line to have a slope. ''' # Regplot fits a regression line to a scatterplot plt.title('Residuals vs Fitted Values') sns.regplot(results.fittedvalues, results.resid) plt.xlabel('Y-hat') plt.ylabel('Residuals') plt.show() ``` It doesn't look like there's heteroskedasticity, and the regression line is flat. However I think given our sample size and the significance of our variables, it couldn't hurt to specify heteroskedasticity-robust standard errors (the cov=HC0 argument in fitOLS). In practice I rarely see standard errors that aren't robust to either heteroskedasticity or clustering. (If we wanted to cluster, we would have to choose variables to cluster on, and I haven't looked into that for our data). ``` '''Normality of the residuals with mean 0 is another assumption under OLS. If residuals are nonnormal and not approximately centered at 0, the model is probably misspecified. The first chart is a kernel density estimation and the second is a Q-Q plot. Q-Q plots compare two datasets to see whether or not they come from the same distribution. If they do, the points should form a straight line. Here, we have a Normal Q-Q plot, where our residuals are being compared against a normal distribution. ''' # How are our residuals distributed? plt.title('Density Plot of Residuals') sns.kdeplot(results.resid) plt.show() # How close are our residuals to normal? fig = gofplots.qqplot(results.resid, line='s') plt.title("Normal Q-Q plot") plt.show() ``` The residuals appear to be approximately centered around 0. The third chart shows that our residuals are close to normal, but at the extreme ends of our distribution we get farther from a normal distribution. ### Second specification attempt - dockless? Next, I add dless_trips_all to the specification to see if there's any effect. ``` results = fitOLS('cabi_trips ~ year + daylight_hours +' 'apparenttemperaturehigh + rain + snow + ' 'nats_games + C(day_of_week) + dless_trips_all', cov='HC0') results.summary() ``` R squared is slightly higher. dless_trips_all is statistically significant, but its coefficient is small. An increase of 100 dockless trips is associated with 33 fewer CaBi trips. Its upper bound is also fairly close to 0. For the sake of brevity I don't include the diagnostic plots here because they don't change much after adding just one independent variable. ### Third specification attempt - transformations Next, I try taking the natural log of certain variables. When you include a logged variable, its interpretation changes to percentage change instead of unit change. I get into specifics in the cell after the regression results. Logging variables is also very good for dealing with outliers. OLS is sensitive to outliers - we saw this demonstrated in class when we removed one observation from the IQ ~ TVhours regression. Logging a variable with a long right tail will often make it approximately normal, which is better for OLS. ``` # I ran into errors trying to log cabi_trips because the log of 0 is undefined. # Ended up having to drop the four observations where cabi_trips==0 df = df[df.cabi_trips != 0] df.shape results = fitOLS('np.log(cabi_trips) ~ year + daylight_hours + ' 'np.log(apparenttemperaturehigh) + rain + snow + nats_games + C(day_of_week) + ' 'dless_trips_all', cov='HC0') results.summary() ``` Since we have some logged variables, the interpretation of the coefficients changes. Before, the interpretation of apparenttemperaturehigh's effect on cabi_rides was basically "Holding all else equal, how many more cabi rides should we see if the feels-like temperature is one degree (F) higher?" Now that both are logged, the coefficient of 0.8136 means "Holding all else equal, if feels-like temperature rises by 1%, we expect there to be a 0.81% increase in CaBi rides." I explain the interpretation of the dummy coefficients below. ``` # When you have a logged dependent variable, be careful with dummies # The effect is asymmetrical! # more: https://davegiles.blogspot.com/2011/03/dummies-for-dummies.html print('If rain switches from 0 to 1, the % impact on cabi_trips is ', 100*(np.exp(-0.2168) - 1)) print('If rain switches from 1 to 0, the % impact on cabi_trips is ', 100*(np.exp(0.2168) - 1)) print('If snow switches from 0 to 1, the % impact on cabi_trips is ', 100*(np.exp(-0.3684) - 1)) print('If snow switches from 1 to 0, the % impact on cabi_trips is ', 100*(np.exp(0.3684) - 1)) ``` All in all, this third specification isn't that appealing. nats_games is no longer significant, the R squared is lower, and the dummy variables don't make as much intuitive sense. Looking at the charts below you can see that things look worse than before. This particular specification is no good. ``` # Heteroskedasticity? plt.title('Residuals vs Fitted Values') sns.regplot(results.fittedvalues, results.resid) plt.xlabel('Y-hat') plt.ylabel('Residuals') plt.show() # How are our residuals distributed? plt.title('Density Plot of Residuals') sns.kdeplot(results.resid) plt.show() # How close are our residuals to normality? fig = gofplots.qqplot(results.resid, line='s') plt.title("Normal Q-Q plot") plt.show() ```
github_jupyter
# Table of Contents <p><div class="lev1"><a href="#Introduction"><span class="toc-item-num">1&nbsp;&nbsp;</span>Introduction</a></div><div class="lev2"><a href="#random-process"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>random process</a></div><div class="lev2"><a href="#probability"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>probability</a></div><div class="lev2"><a href="#law-of-large-numbers"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>law of large numbers</a></div><div class="lev1"><a href="#Disjoint-Events-+-General-Addition-Rule"><span class="toc-item-num">2&nbsp;&nbsp;</span>Disjoint Events + General Addition Rule</a></div><div class="lev2"><a href="#union-of-disjoint-events"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>union of disjoint events</a></div><div class="lev2"><a href="#complementary-events"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>complementary events</a></div><div class="lev1"><a href="#Independence"><span class="toc-item-num">3&nbsp;&nbsp;</span>Independence</a></div><div class="lev1"><a href="#Probability-Examples"><span class="toc-item-num">4&nbsp;&nbsp;</span>Probability Examples</a></div><div class="lev1"><a href="#Disjoint-vs-Independent"><span class="toc-item-num">5&nbsp;&nbsp;</span>Disjoint vs Independent</a></div> # Introduction ## random process <img src="images/Screen Shot 2016-05-30 at 5.16.48 PM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/07vL4/introduction) 1:00* <!--TEASER_END--> - Examples of random processes are coin tosses, die rolls, the shuffle mode on your music player, or the stock market. ## probability - A traditional definition of probability is a relative frequency. This is the **frequentist interpretation** of probability, where the probability of an outcome is the proportion of the times the outcome would occur if we observed the random process an infinite number of times. - An alternative interpretation is the **Bayesian interpretation**. A Bayesian interprets a probability as a subjective degree of belief. For the same event, two separate people could have different viewpoints and so assign different probabilities to it. This interpretation allows for prior information to be integrated into the inferential framework. Bayesian methods have been largely popularized by revolutionary advances in computational technology and methods during the last 20 years. <img src="images/Screen Shot 2016-05-30 at 5.20.11 PM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/07vL4/introduction) 2:40* <!--TEASER_END--> ## law of large numbers - The law of large numbers states that as more observations are collected, the proportion of occurrences with a particular outcome converges to the probability of that outcome. - This is why, as we roll a fair die many times, we expect the proportion of say, fives, to settle down to one-sixth. While earlier in the sequence, with too few rolls, we might not exactly get one in six fives. For example, if you roll a die say, six times, there's no guarantee that you're going to get at least one five in there. But if you roll the die say, 600 or 6,000 times, you would expect to see about one-sixth of the time to get a five. Similarly, why it would be more surprising to see three heads in 1,000 coin flips, than three heads in 10 or 100 coin flips. <img src="images/Screen Shot 2016-05-31 at 8.55.11 AM.png"> <img src="images/Screen Shot 2016-05-31 at 8.55.33 AM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/07vL4/introduction) 3:40* <!--TEASER_END--> - Say you toss a coin ten times, and it lands on heads each time. What do you think the chance is that another head will come up on the next toss? 0.5, less than 0.5, or more than 0.5? - The probability is still 50%. So, the probability of heads on the11th toss is the same as the probability of heads on the 10th toss, or any previous tosses, which is 0.5. Each toss is independent, hence, the outcome of the next toss does not depend on the outcome of the previous toss. Another way of thinking about it is that the coin is memoryless. It doesn't remember what happened before and say to itself, well let me roll over on the other side next time. In other words, the coin is not due for a tail. <img src="images/Screen Shot 2016-05-31 at 9.04.55 AM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/07vL4/introduction) 4:46* <!--TEASER_END--> # Disjoint Events + General Addition Rule - **Disjoint events** by definition, cannot happen at the same time. A synonym for this term is **mutually exclusive**. - For example, the outcome of a single coin toss cannot be a head and a tail. A student cannot both fail and pass a class. A single card drawn from a deck cannot be an ace and a queen at the same time. - In Venn diagram representation, where we represent each event by these circles, if events A and B are disjoint we end up with two circles that don't touch each other, which indicates that the probability of event A and B happening at the same time. So probability A and B is 0. In other words, the events don't joint hence the term disjoint. - **Non-disjoint events** on the other hand can happen at the same time. A Venn diagram representation of events A and B that are non disjoint we have two circles that over lap, in other words join, which indicates that the probability of event A and B happening at the same time is non 0. So it's some number between 0 and 1. <img src="images/Screen Shot 2016-06-01 at 11.46.35 AM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/qaYwc/disjoint-events-general-addition-rule) 1:20* <!--TEASER_END--> ## union of disjoint events <img src="images/Screen Shot 2016-06-01 at 11.54.37 AM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/qaYwc/disjoint-events-general-addition-rule) 2:47* <!--TEASER_END--> <img src="images/Screen Shot 2016-06-01 at 2.43.33 PM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/qaYwc/disjoint-events-general-addition-rule) 5:11* <!--TEASER_END--> ## complementary events <img src="images/Screen Shot 2016-06-01 at 2.50.11 PM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/qaYwc/disjoint-events-general-addition-rule) 9:28* <!--TEASER_END--> # Independence <img src="images/Screen Shot 2016-06-01 at 2.56.10 PM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/D1m0l/independence) 3:55* <!--TEASER_END--> <img src="images/Screen Shot 2016-06-01 at 8.11.24 PM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/D1m0l/independence) 5:24* <!--TEASER_END--> <img src="images/Screen Shot 2016-06-01 at 8.22.47 PM.png"> <img src="images/Screen Shot 2016-06-01 at 8.22.54 PM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/D1m0l/independence) 9:57* <!--TEASER_END--> # Probability Examples <img src="images/Screen Shot 2016-06-01 at 8.27.15 PM.png"> <img src="images/Screen Shot 2016-06-01 at 8.27.21 PM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/GqcO0/probability-examples) 2:54* <!--TEASER_END--> <img src="images/Screen Shot 2016-06-01 at 8.34.38 PM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/GqcO0/probability-examples) 8:38* <!--TEASER_END--> <img src="images/Screen Shot 2016-06-01 at 8.55.13 PM.png"> <img src="images/Screen Shot 2016-06-01 at 8.55.20 PM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/GqcO0/probability-examples) 9:01* <!--TEASER_END--> # Disjoint vs Independent <img src="images/Screen Shot 2016-06-01 at 9.00.07 PM.png"> *Screenshot taken from [Coursera](https://www.coursera.org/learn/probability-intro/lecture/PSXBC/spotlight-disjoint-vs-independent) 2:17* <!--TEASER_END-->
github_jupyter
# BPM ESTIMATION ON REAL VIDEO Explanation of the script for testing on real data in the same condition as pyVHR ## Import librairies Previously , you have to install theses python librairies : * tensorflow (2.2.0) * matplotlib * scipy * numpy * opencv-python * Copy * pyVHR (0.0.1) ``` ## ## Importing libraries ## #Tensorflow/KERAS import tensorflow as tf from tensorflow.python.keras.models import Sequential from tensorflow.python.keras.models import model_from_json from tensorflow.python.keras.utils import np_utils # Numpy / Matplotlib / OpenCV / Scipy / Copy / ConfigParser import numpy as np import scipy.io import scipy.stats as sp import matplotlib.pyplot as plt import cv2 from copy import copy import os import configparser #pyVHR from pyVHR.signals.video import Video from pyVHR.datasets.dataset import Dataset from pyVHR.datasets.dataset import datasetFactory ``` ## Load configuration (BPMEstimationOnRealVideo.cfg) ``` ## ## Loading configuration ## config = configparser.ConfigParser() config.read('./BPMEstimationOnRealVideo.cfg') if(int(config['ExeConfig']['useCPU']) == 1): #RUN ON CPU os.environ['CUDA_VISIBLE_DEVICES'] = '-1' ``` ## Load the video & pyVHR processing In the pyVHR framework, we work on a processed video. The processing consists of detecting and extracting an area of interest, in order to apply our rPPGs methods on relevant data. * videoFilename : path of the video * return : video processed ``` ## ## Loading the video & pyVHR processing ## def extraction_roi(video_filename): video = Video(video_filename) video.getCroppedFaces(detector='dlib', extractor='skvideo') video.setMask(typeROI='skin_adapt',skinThresh_adapt=0.30) return video ``` ## Load the model Load model & classes * model_path : path of the model * return : * model : the model trained to make predictions * freq_BPM : array containing the set of classes (representing each bpm) known by the model ``` ## ## Loading the model ## def loadmodel(model_path): model = model_from_json(open(f'{model_path}/model_conv3D.json').read()) model.load_weights(f'{model_path}/weights_conv3D.h5') model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # define the frequencies // output dimension (number of classes used during training) freq_bpm = np.linspace(55, 240, num=model.output_shape[1]-1) freq_bpm = np.append(freq_bpm, -1) # noise class return model, freq_bpm ``` ## Convert videoframes to a single channel array Select one channel for making prediction * video : whole video * model : the model trained to make predictions * startFrame : first frame to be read * return : frames normalized ``` ## ## Converting videoframes to a single channel array ## def convert_video_to_table(video,model, start_frame): imgs = np.zeros(shape=(model.input_shape[1], video.cropSize[0], video.cropSize[1], 1)) # channel extraction if (video.cropSize[2]<3): IMAGE_CHANNELS = 1 else: IMAGE_CHANNELS = video.cropSize[2] # load images (imgs contains the whole video) for j in range(0, model.input_shape[1]): if (IMAGE_CHANNELS==3): temp = video.faces[j + start_frame]/255 temp = temp[:,:,1] # only the G component is currently used else: temp = video.faces[j + start_frame] / 255 imgs[j] = np.expand_dims(temp, 2) return imgs ``` ## Get a prediction Using the model to make a prediction on a map tile * model : the model trained to make predictions * freq_bpm : array containing the set of classes (representing each bpm) known by the model * xtest : model input * return : A prediction ``` ## ## Using the model to make a prediction on a map tile ## def get_prediction(model,freq_bpm, xtest): idx = 0 # model.predict input_tensor = tf.convert_to_tensor(np.expand_dims(xtest, 0)) h = model(input_tensor) h = h.numpy() #Binary prediction res = np.zeros(shape=(76)) idx = get_idx(h[0]) res[idx] = 1 return res ``` ## Format Video Realization of a catagraphy of predictions on the video. This function formats the video in several sets of tests, in order to make multiple predictions. The sum of these predictions is returned. * video : whole video * model : the model trained to make predictions * imgs : Video sequence submitted to the prediction (including the subject's face) * freq_BPM : array containing the set of classes (representing each bpm) known by the model * stepX : horizontal step for mapping * stepY : vertical step for mapping ``` ## ## Formating Video ## def formating_data_test(video, model, imgs , freq_bpm, step_x, step_y): # output - sum of predictions predictions = np.zeros(shape=(len(freq_bpm))) # Displacement on the x axis iteration_x = 0 # Our position at n + 1 on the X axis axis_x = model.input_shape[3] # width of video width = video.cropSize[1] # height of video height = video.cropSize[0] # Browse the X axis while axis_x < width: # Displacement on the y axis axis_y = model.input_shape[2] # Our position at n + 1 on the Y axis iteration_y = 0 # Browse the Y axis while axis_y < height: # Start position x1 = iteration_x * step_x y1 = iteration_y * step_y # End position x2 = x1 + model.input_shape[3] y2 = y1 + model.input_shape[2] # Cutting face_copy = copy(imgs[0:model.input_shape[1],x1:x2,y1:y2,:]) # randomize pixel locations for j in range(model.input_shape[1]): temp = copy(face_copy[j,:,:,:]) np.random.shuffle(temp) face_copy[j] = temp # Checks the validity of cutting if(np.shape(face_copy)[1] == model.input_shape[3] and np.shape(face_copy)[2] == model.input_shape[2]): # prediction on the cut part xtest = face_copy - np.mean(face_copy) predictions = predictions + get_prediction(model,freq_bpm,xtest) # increments axis_y = y2 + model.input_shape[2] iteration_y = iteration_y +1 # increments axis_x = x2 + model.input_shape[3] iteration_x = iteration_x + 1 return predictions ``` ## Find the label associated with the prediction Applying the formula to transform the prediction result into a value representing the estimated heart rate (BPM) * prediction : array including the addition of all predictions * freq_bpm : array containing the set of classes (representing each bpm) known by the model * return : bpm value calculated ![bpm_formula](./img/bpm_formula.JPG) ``` ## ## Finding the label associated with the prediction ## def get_class(prediction, freq_bpm): nb_bins = 0 score = 0 for i in range(len(prediction)-1): nb_bins += prediction[i] score += freq_bpm[i] * prediction[i] bpm = score / nb_bins return bpm ``` ## Get the index of the maximum value of a prediction Use the model to make prediction * h : Array (here a prediction) * return : index of the maximum value of an array ``` ## ## Get the index of the maximum value of a prediction ## def get_idx(h): idx =0 maxi = -1 #find label associated for i in range(0, len(h)): if maxi < h[i]: idx = i maxi = h[i] return idx ``` ## Make a prediction Function to make prediction on veritable data * video : whole video * model : the model trained to make predictions * start_frame : index of first frame to process * x_step : horizontal step for mapping * y_step : vertical step for mapping * return : Estimated BPM ``` ## ## Make a prediction ## def make_prediction(video, model, freq_bpm, start_frame, x_step, y_step): #extract Green channel or Black & whrite channel frames_one_channel = convert_video_to_table(video,model, start_frame) prediction = formating_data_test(video, model, frames_one_channel, freq_bpm, x_step, y_step) # get bpm bpm = get_class(prediction, freq_bpm) return bpm ``` ## Get Ground truth * name_dataset: name of dataset used * video_gt_filename : path of the GT file * win_size_gt : window size of the GT * freq_bpm : array containing the set of classes (representing each bpm) known by the model * return : list of true BPM ``` ## ## Getting Ground truth ## def get_gt(name_dataset, video_gt_filename, win_size_gt, freq_bpm): dataset = datasetFactory(name_dataset) sig_gt = dataset.readSigfile(video_gt_filename) bpm_gt, times_gt = sig_gt.getBPM(win_size_gt) return bpm_gt ``` ## MAIN ``` ## ## MAIN ## videoFilename = str(config['ExeConfig']['videoFilename']) #video to be processed path modelFilename = str(config['ExeConfig']['modelFilename']) #model path # ROI EXTRACTION video = extraction_roi(videoFilename) # Load the model model, freq_bpm = loadmodel(modelFilename) frameRate = int(config['DataConfig']['frameRate']) nameDataset = str(config['ExeConfig']['nameDataset']) videoGTFilename = str(config['ExeConfig']['videoGTFilename']) winSizeGT = int(config['DataConfig']['winSizeGT']) #Data preparation x_step = int(config['DataConfig']['Xstep']) y_step = int(config['DataConfig']['Ystep']) NB_LAPSE = int(video.numFrames / frameRate) if(int(config['ExeConfig']['useNbLapse']) == 1): NB_LAPSE = int(config['DataConfig']['NbLapse']) GT_BPM = get_gt(nameDataset, videoGTFilename, winSizeGT, freq_bpm) # tables for display Tab_BPM_estimated = [] Tab_BPM_True = [] Tab_Lapse = [] # Second-by-second estimation for lapse in range(0 ,NB_LAPSE): startFrame = lapse * frameRate end = startFrame + model.input_shape[1] if(end > video.numFrames): break BPM_estimated = make_prediction(video, model, freq_bpm, startFrame, x_step, y_step) Tab_BPM_estimated.append(BPM_estimated) BPM_True = int(GT_BPM[lapse+int(winSizeGT/2)]) Tab_BPM_True.append(BPM_True) Tab_Lapse.append(lapse) # Graphic display fig, ax = plt.subplots(nrows=1, ncols=1) ax.plot(np.asarray(Tab_Lapse), np.asarray(Tab_BPM_estimated), marker='+', color='blue', label='BPM ESTIMATES') ax.plot(np.asarray(Tab_Lapse), np.asarray(Tab_BPM_True), marker='+', color='red', label='BPM GT') plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/luislauriano/Data_Science/blob/master/An%C3%A1lise_coronav%C3%ADrus.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # **Análise dos dados do Coronavírus** O novo coronavírus de 2019 (2019-nCoV) é um vírus (mais especificamente, um coronavírus) identificado como a causa de um surto de doença respiratória detectado pela primeira vez em Wuhan, China. No início, muitos dos pacientes do surto em Wuhan, na China, teriam algum vínculo com um grande mercado de frutos do mar e animais, sugerindo a disseminação de animais para pessoas. No entanto, um número crescente de pacientes supostamente não teve exposição ao mercado de animais, indicando a ocorrência de disseminação de pessoa para pessoa. # **Obtenção de dados** Este dataset foi retirado do [kaggle.com](https://) e disponibilizado pela Johns Hopkins University. O conjunto de dados possui informações diárias sobre o número de casos afetados, mortes e recuperação do novo coronavírus de 2019. Observe que esses são dados de séries temporais e, portanto, o número de casos em um determinado dia é o número acumulado. Disponibilizado pela Johns Hopkins University que criou um excelente painel usando os dados de casos afetados. **Os dados analisados vão do dia 22/01/2020 ao 09/03/2020** **Informações sobre os atributos** * Sno - número de série * ObservationDate - Data da observação em MM / DD / AAAA * Province/State - Província ou estado da observação (pode estar vazio quando estiver ausente) * Country/Region - país de observação * Last Update- Horário no UTC em que a linha é atualizada para a província ou país em questão. (Não padronizado e, portanto, limpe antes de usá-lo) * Confirmed - número acumulado de casos confirmados até essa data * Deaths - Número acumulado de óbitos até essa data * Recovered - Número acumulado de casos recuperados até essa data **Importar os dados** > Nesta etapa inicial importaremos o dataset mencionado anteriormente e as bibliotecas que iremos utilizar ``` #Importar bibliotecas necessárias import pandas as pd import matplotlib.pyplot as plt import seaborn as sns #Setar o estilo do seaborn sns.set(style="darkgrid") #Importar o dataset do covid-19 df = pd.read_csv('https://raw.githubusercontent.com/luislauriano/Data_Science/master/Datasets/covid_19_data.csv') #Importar um segundo dataset df2 = pd.read_csv('https://raw.githubusercontent.com/luislauriano/Data_Science/master/Datasets/covid_2103.csv') ``` # **Análise inicial dos dados** > O objetivo desta etapa é criar uma consciência situacional inicial e permitir um entendimento de como os dados estão estruturados. Para facilitar a nossa análise. Como de costume de toda e qualquer análise de dados, vamos verificar a cara do nosso dataset e já termos uma noção de com oque vamos trabalhar. ``` #Apresentar as primeiras linhas do dataset df.head() ``` Já que o nosso dataset possui apenas oito colunas, vamos aproveitar isso para renomear as colunas para português, facilitando o entendimento. ``` #Renomeando as colunas para português df.columns = ['Nº de série', 'data_da_observaçao', 'provincia_ou_estado', 'país', 'ultima_atualizaçao', 'casos_confirmados', 'casos_mortes', 'casos_recuperados' ] df2.columns = ['Nº de série', 'data_da_observaçao', 'provincia_ou_estado', 'país', 'ultima_atualizaçao', 'casos_confirmados', 'casos_mortes', 'casos_recuperados' ] #As primeiras linhas do dataset novamente df.head() ``` Vamos finalizar essa análise inicial verificando quais são as últimas linhas do nosso dataset. ``` #As últimas linhas do dataset df.tail() ``` # **Preparação dos dados** > Abaixo será apresentado a análise dos dados do dataset, que embasará a conclusão do estudo. **Qual o tamanho e tipo dos atributos do dataset?** ``` #Tamanho do dataset print(f'Nº de Linhas: {df.shape[0]}') print(f'Nº de Colunas: {df.shape[1]}') #Tipo das variáveis df.dtypes.sort_values(ascending=False) ``` Se observarmos, foi verificado que nosso dataset possui 4513 linhas, 8 colunas e suas variáveis em sua maioria são do tipo objeto. > A qualidade de um dataset está diretamente relacionada à quantidade de valores ausentes, por isso, sempre devemos verificar a quantidade de valores ausentes presentes em nosso dataset. **Algum atributo possue valores ausentes?** ``` #Verificando valores ausentes do df (isnull().sum()) (df.isnull().sum()/df.shape[0]).sort_values(ascending=False) ``` Podemos observar que o atributo **provincia_ou_estado** tem aproximadamente 0.3% dos valores ausentes, enquantos os outros atibutos não apresentam valores ausentes. De acordo com a documentação dos dados, caso viessem nulos é por que a informação foi perdida. Se quiséssemos excluir os dados ausentes do atributo **provincia_ou_estado**, poderiamos utilizar o comando dropna e verificar os valores ausentes novamente. Como abaixo. ``` #Excluindo valores ausentes ('dropna') df.dropna(inplace=True) #Ver valores ausentes novamente ((df.isnull().sum() / df.shape[0])).sort_values(ascending=False) ``` Agora o atributo **provincia_ou_estado** nao obteria mais valores ausentes. **Qual o período em que os dados estão sendo analisados?** Por se tratar de um vírus que está crescendo todos os dias e tornando-se uma pandemia, precisamos saber qual o período que estamos analisando. ``` #Data de inicio e fim print('Início: ', df.data_da_observaçao.min()) print('Fim: ', df.data_da_observaçao.max()) ``` Pra finalizar, abaixo temos o resumo do nosso dataset ``` #Resumo do nosso dataset df.info() ``` # **Análise exploratória dos casos do coronavirus** > Depois de uma preparação e tratamento dos dados do nosso dataset, podemos partir para os objetivos desse projeto e começar a explorar os dados. O objetivo dessa etapa e das que estão por vim é analisar e extrair informações sobre o coronavirus com base no nosso dataset. **Quais são os países afetados pelo novo vírus?** ``` # Mostrar os países afetados print(f'Os países afetados pelo novo vírus são: {df.país.unique()}') # Total de países afetados print(f'Ao total foram: {len(df.país.unique())} países afetados') ``` **Quais países foram mais afetados?** ``` #Os países que mais foram afetados df.groupby('país')['casos_confirmados', 'casos_mortes', 'casos_recuperados'].max().sort_values(by=['casos_confirmados','casos_mortes','casos_recuperados'], ascending = False)[:20] ``` É possivel identificar que a China lidera os países com o maior número de casos confirmados e de mortes, em seguida Hong Kong e em terceiro Japão. O fato da China apresentar um alto índice está diretamente relacionado ao país ter sido o berço do vírus. Se analisarmos o fato de que o ultimo em que foi registrado casos nesse dataset foi o dia **09/03**, podemos observar que também foi o dia em que o Canadá apresentou o seu segundo caso de morte pelo coronavírus de acordo com as noticias e já conseguimos observar isso na análise. Também é possivel identificar: * Até o momento os Estados Unidos, França e Coreia do Sul apresentam um maior número de casos de mortes do que casos recuperados * A China tem mais da metade de seus casos confirmados, recuperados > Vamos criar um gráfico de barra para visualizar melhor os casos do coronavírus por país ``` #Gráfico de barra dos casos do coronavirus por país casos = (df.groupby('país')['casos_confirmados','casos_mortes','casos_recuperados'].nunique().sort_values(by=['casos_confirmados','casos_mortes','casos_recuperados'], ascending = False))[:15] fig, ax = plt.subplots(figsize=(15,6)) casos.plot(kind='bar', ax=ax) ax.set_title('Nº de casos por país') ax.set_ylabel('Quantidade de casos') ax.set_xlabel('Nome do país') plt.tight_layout() ``` Nota-se que a China se destaca quando comparada aos outros países. Porém, este dataset vai do dia **22/01** até o dia **09/03** e é possivel observar que a China apresenta um maior número de casos recuperados do que casos de mortes. Talvez, se o dataset fosse coletado a um ou dois meses atrás, o gráfico fosse diferente e a China apresentasse um maior número de mortes do que casos recuperados. É possivel indentificar que os países Coreia do Sul, Estados Unidos, França, Itália e Iran, estão passando pelo pico do vírus e assim apresentam um número maior de casos de mortes do que casos recuperados. **Evolução do coronavirus** ``` #Evolução dos casos do coronavirus evoluçao = df.groupby('data_da_observaçao')['casos_confirmados', 'casos_mortes', 'casos_recuperados'].max() fig, ax = plt.subplots(figsize = (15,5)) evoluçao.plot(kind='line', marker='o', ax=ax) ax.set_title('Evolução de casos do coronavírus') ax.set_xlabel('Data') ax.set_ylabel('Quantidade de casos') plt.tight_layout() ``` É possivel observar que até o dia **11/02** aproximadamente um mês atrás (levando em consideração que estou escrevendo no dia 11/03) o número de casos de mortes e recuperados cresciam na mesma proporção. Porém, a partir do dia **21/02** o número de casos recuperados já apresentava uma boa diferença do número de casos de mortes e até o momento o número de casos de mortes tem mantido estabilizado, distante do número de casos recuperados. # **Estados Unidos, preocupante?** Como mostrado anteriormente, nos Estados Unidos tem crescido o número de casos confirmados pelo coronavírus, porém, o número de casos de mortes é maior do que casos recuperados em relação aos casos confirmados. Isto acontece devido ao pico de quando o vírus se choca com o país, oque deve acontecer em cada país afetado pelo vírus. Por isso, vamos agora visualizar melhor a evolução do vírus no país. ``` #Extraindo o país Estados Unidos da coluna País eua = df.loc[df['país'] == 'US'] eua.head() ``` Agora que já temos todos os valores dos Estados Unidos, vamos visualizar como foi a evolução do coronavirus no país no periodo do dia **22/01 ao 09/03**. ``` #Gráfico de linhas para visualizar a evolução de casos na China evolucao_EUA = eua.groupby('data_da_observaçao')['casos_confirmados', 'casos_mortes', 'casos_recuperados'].max() fig, ax = plt.subplots(figsize = (15,5)) evolucao_EUA.plot(kind='line', marker='o', ax=ax) ax.set_title('Evolução do coronavírus nos Estados Unidos') ax.set_xlabel('Data') ax.set_ylabel('Quantidade de casos') plt.tight_layout() ``` É possível identificar que a crescente de casos confirmados no páis se iniciou a partir do dia **21/02** e desde então tem aumentado. Também é possível identificar que o número de casos recuperados e de casos de mortes nunca apresentaram uma larga diferença, entretanto, a partir do dia **02/03** o número de casos de mortes tem crescido mais do que o número de casos recuperados. **Qual a porcentagem de casos de mortes e recuperados sobre os casos confirmados?** ``` print(f'Porcentagem de Casos de Mortes: {(eua["casos_mortes"].max() / eua["casos_confirmados"].max())*100}') print(f'Porcentagem de Casos Recuperados: {(eua["casos_recuperados"].max() / eua["casos_confirmados"].max())*100}') ``` # **Analisando os dados da China** ``` #Extraindo os valores de China da coluna País china = df.loc[df['país'] == 'Mainland China'] china.head() ``` > Agora vamos criar um gráfico para visualizar como foi a evolução do caso do coronavirus na China no periodo do dia **22/01 ao 09/03** ``` #Gráfico de linhas para visualizar a evolução de casos na China evolucao_china = china.groupby('data_da_observaçao')['casos_confirmados', 'casos_mortes', 'casos_recuperados'].max() fig, ax = plt.subplots(figsize = (15,5)) evolucao_china.plot(kind='line', marker='o', ax=ax) ax.set_title('Evolução de casos do coronavírus na China') ax.set_xlabel('Data') ax.set_ylabel('Quantidade de casos') plt.tight_layout() ``` Diferente dos Estados Unidos, podemos observar que a China já passou pelo seu pico do coronavírus e hoje já se encontra mais estabilizada, com o número de casos confirmados maior do que o número de mortes. É possível identificar que mesmo que o número de casos confirmados tenha tido um alto crescimento, aproximadamente a partir do dia **12/02**, o número de casos recuperados também cresceu proporcionalmente e no dia **21/02** o número de casos recuperados já apresentava um boa diferença do número de casos de mortes. **Quais foram as provincias/estado da China que mais foram afetados?** ``` #Províncias ou estados da China que mais foram afetados china.groupby('provincia_ou_estado')['casos_confirmados','casos_mortes','casos_recuperados'].max().sort_values(by='casos_confirmados', ascending = False)[:10] ``` > Vamos criar um grafico para visualizar melhor essas províncias ou estados que foram afetados ``` #Gráfico de barra para as provincias ou estados da China que mais foram afetados provincia_estado = china.groupby('provincia_ou_estado')['casos_confirmados','casos_mortes','casos_recuperados'].nunique().sort_values(by='casos_confirmados', ascending = False)[:10] fig, ax = plt.subplots(figsize=(15,6)) provincia_estado.plot(kind='bar', ax=ax) ax.set_title('Províncias ou estados da China que mais foram afetados') ax.set_ylabel('Quantidade de casos') ax.set_xlabel('Nome da provincia ou estado') plt.tight_layout() ``` Nota-se que em sua maioria a China conta com estados ou províncias com maior número de casos recuperados do que de casos confirmados e com uma larga diferença dos casos de mortes. **Qual a porcentagem de mortos sobre os casos confirmados?** ``` print(f'Porcentagem de Casos de Mortes: {(china["casos_mortes"].max() / china["casos_confirmados"].max())*100}') print(f'Porcentagem de Casos Recuperados: {(china["casos_recuperados"].max() / china["casos_confirmados"].max())*100}') ``` # **Analisando os dados do Brasil** Como já dito anteriormente, o registro de dados no nosso dataset se inicia no dia **22/01** e se encerra no dia que eu o coletei, **09/03**. Porém, a partir dos dias **09/03** e **08/03** o Brasil começou a iniciar oque poderíamos chamar de pico do coronavírus, aumentando o seu número de casos confirmados. Então, resolvi coletar um novo dataset no dia **13/03** e compararmos o crescimento do vírus em nosso país. ``` #Extraindo valores do Brasil da coluna País do df 22/01 ao 09/03 brasil = df.loc[df['país'] == 'Brazil'] brasil.head() #Extraindo valores do Brasil da coluna País do df2 22/01 ao 11/03 brasil2 = df2.loc[df2['país'] == 'Brazil'] brasil2.head() ``` **Brasil 22/01 ao 09/03** ``` #Data de inicio e fim print('Início: ', df.data_da_observaçao.min()) print('Fim: ', df.data_da_observaçao.max()) ``` Nota-se que este dataset se inicia no dia 22/01 e termina no dia **09/03**. Vamos visualizar como foi o crescimento do vírus no país até essa data, abaixo. ``` evolucao_brasil = brasil.groupby('data_da_observaçao')['casos_confirmados', 'casos_mortes', 'casos_recuperados'].max() fig, ax = plt.subplots(figsize = (15,5)) evolucao_brasil.plot(kind='line', marker='o', ax=ax) ax.set_title('Evolução de casos do coronavírus no Brasil') ax.set_xlabel('Data') ax.set_ylabel('Quantidade de casos') plt.tight_layout() ``` **Brasil 22/01 ao 11/03** ``` #Data de inicio e fim print('Início: ', df2.data_da_observaçao.min()) print('Fim: ', df2.data_da_observaçao.max()) ``` Fica claro que o nosso segundo dataset, o **df2**, termina os seus registros de dados no dia **11/03**. Agora vamos comparar como foi o crescimento do vírus no páis do dia **08/03** ao dia **11/03**, abaixo. ``` evolucao_brasil2 = brasil2.groupby('data_da_observaçao')['casos_confirmados', 'casos_mortes', 'casos_recuperados'].max() fig, ax = plt.subplots(figsize = (15,5)) evolucao_brasil2.plot(kind='line', marker='o', ax=ax) ax.set_title('Evolução de casos do coronavírus no Brasil') ax.set_xlabel('Data') ax.set_ylabel('Quantidade de casos') plt.tight_layout() ``` Se analisarmos e compararmos com calma os dois gráficos que mostram a evolução do coronavírus no Brasil. Podemos identificar que do dia **08/03** ao dia **11/03** o país teve um crescimento aproximado de mais de 20 casos confirmados. # **Conclusão** O que se pode concluir é que cada país vai passar por um pico do vírus e a forma de como as autoridades do país vão responder, está claramente ligada ao número de casos confirmados, casos de mortes e casos recuperados que o país irá apresentar. Na análise feita foi identificado que a China já apresenta um número maior de casos recuperados do que de casos de mortes. Países como Estados Unidos, França, Coreia do Sul e Irã estão entrando no pico do vírus, apresentando um número maior de casos de mortes do que de casos recuperados. Países como Brasil e Canadá começaram a ter seus primeiros grandes choques com o vírus. Lembra-se que essa análise foi baseada em uma base de dados disponibilizada pela Johns Hopkins University.
github_jupyter
Copyright (c) 2020-2021. All rights reserved. Licensed under the MIT License. # Troubleshooting HPO for fine-tuning pre-trained language models ## 1. Introduction In this notebook, we demonstrate a procedure for troubleshooting HPO failure in fine-tuning pre-trained language models (introduced in the following paper): *[An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models](https://arxiv.org/abs/2106.09204). Xueqing Liu, Chi Wang. To appear in ACL-IJCNLP 2021* Notes: *In this notebook, we only run each experiment 1 time for simplicity, which is different from the paper (3 times). To reproduce the paper's result, please run 3 repetitions and take the average scores. *Running this notebook takes about one hour. FLAML requires `Python>=3.6`. To run this notebook example, please install flaml with the `notebook` and `nlp` options: ```bash pip install flaml[nlp] ``` Our paper was developed under transformers version 3.4.0. We uninstall and reinstall transformers==3.4.0: ``` !pip install flaml[nlp] !pip install transformers==3.4.0 from flaml.nlp import AutoTransformers ``` ## 2. Initial Experimental Study ### Load dataset Load the dataset using AutoTransformer.prepare_data. In this notebook, we use the Microsoft Research Paraphrasing Corpus (MRPC) dataset and the Electra model as an example: ``` autohf = AutoTransformers() preparedata_setting = { "dataset_subdataset_name": "glue:mrpc", "pretrained_model_size": "google/electra-base-discriminator:base", "data_root_path": "data/", "max_seq_length": 128, } autohf.prepare_data(**preparedata_setting) ``` ### Running grid search First, we run grid search using Electra. By specifying `algo_mode="grid"`, AutoTransformers will run the grid search algorithm. By specifying `space_mode="grid"`, AutoTransformers will use the default grid search configuration recommended by the Electra paper: ``` import transformers autohf_settings = { "resources_per_trial": {"gpu": 1, "cpu": 1}, "num_samples": 1, "time_budget": 100000, # unlimited time budget "ckpt_per_epoch": 5, "fp16": True, "algo_mode": "grid", # set the search algorithm to grid search "space_mode": "grid", # set the search space to the recommended grid space "transformers_verbose": transformers.logging.ERROR } validation_metric, analysis = autohf.fit(**autohf_settings) ``` Get the time for running grid search: ``` GST = autohf.last_run_duration print("grid search for {} took {} seconds".format(autohf.jobid_config.get_jobid_full_data_name(), GST)) ``` After the HPO run finishes, generate the predictions and save it as a .zip file to be submitted to the glue website. Here we will need the library AzureUtils which is for storing the output information (e.g., analysis log, .zip file) locally and uploading the output to an azure blob container (e.g., if multiple jobs are executed in a cluster). If the azure key and container information is not specified, the output information will only be saved locally. ``` predictions, test_metric = autohf.predict() from flaml.nlp import AzureUtils print(autohf.jobid_config) azure_utils = AzureUtils(root_log_path="logs_test/", autohf=autohf) azure_utils.write_autohf_output(valid_metric=validation_metric, predictions=predictions, duration=GST) print(validation_metric) ``` The validation F1/accuracy we got was 92.4/89.5. After the above steps, you will find a .zip file for the predictions under data/result/. Submit the .zip file to the glue website. The test F1/accuracy we got was 90.4/86.7. As an example, we only run the experiment one time, but in general, we should run the experiment multiple repetitions and report the averaged validation and test accuracy. ### Running Random Search Next, we run random search with the same time budget as grid search: ``` def tune_hpo(time_budget, this_hpo_space): autohf_settings = { "resources_per_trial": {"gpu": 1, "cpu": 1}, "num_samples": -1, "time_budget": time_budget, "ckpt_per_epoch": 5, "fp16": True, "algo_mode": "hpo", # set the search algorithm mode to hpo "algo_name": "rs", "space_mode": "cus", # customized search space (this_hpo_space) "hpo_space": this_hpo_space, "transformers_verbose": transformers.logging.ERROR } validation_metric, analysis = autohf.fit(**autohf_settings) predictions, test_metric = autohf.predict() azure_utils = AzureUtils(root_log_path="logs_test/", autohf=autohf) azure_utils.write_autohf_output(valid_metric=validation_metric, predictions=predictions, duration=GST) print(validation_metric) hpo_space_full = { "learning_rate": {"l": 3e-5, "u": 1.5e-4, "space": "log"}, "warmup_ratio": {"l": 0, "u": 0.2, "space": "linear"}, "num_train_epochs": [3], "per_device_train_batch_size": [16, 32, 64], "weight_decay": {"l": 0.0, "u": 0.3, "space": "linear"}, "attention_probs_dropout_prob": {"l": 0, "u": 0.2, "space": "linear"}, "hidden_dropout_prob": {"l": 0, "u": 0.2, "space": "linear"}, } tune_hpo(GST, hpo_space_full) ``` The validation F1/accuracy we got was 93.5/90.9. Similarly, we can submit the .zip file to the glue website. The test F1/accuaracy we got was 81.6/70.2. ## 3. Troubleshooting HPO Failures Since the validation accuracy is larger than grid search while the test accuracy is smaller, HPO has overfitting. We reduce the search space: ``` hpo_space_fixwr = { "learning_rate": {"l": 3e-5, "u": 1.5e-4, "space": "log"}, "warmup_ratio": [0.1], "num_train_epochs": [3], "per_device_train_batch_size": [16, 32, 64], "weight_decay": {"l": 0.0, "u": 0.3, "space": "linear"}, "attention_probs_dropout_prob": {"l": 0, "u": 0.2, "space": "linear"}, "hidden_dropout_prob": {"l": 0, "u": 0.2, "space": "linear"}, } tune_hpo(GST, hpo_space_fixwr) ``` The validation F1/accuracy we got was 92.6/89.7, the test F1/accuracy was 85.9/78.7, therefore overfitting still exists and we further reduce the space: ``` hpo_space_min = { "learning_rate": {"l": 3e-5, "u": 1.5e-4, "space": "log"}, "warmup_ratio": [0.1], "num_train_epochs": [3], "per_device_train_batch_size": [16, 32, 64], "weight_decay": [0.0], "attention_probs_dropout_prob": [0.1], "hidden_dropout_prob": [0.1], } tune_hpo(GST, hpo_space_min) ``` The validation F1/accuracy we got was 90.4/86.7, test F1/accuracy was 83.0/73.0. Since the validation accuracy is below grid search, we increase the budget to 4 * GST: ``` hpo_space_min = { "learning_rate": {"l": 3e-5, "u": 1.5e-4, "space": "log"}, "warmup_ratio": [0.1], "num_train_epochs": [3], "per_device_train_batch_size": [32], "weight_decay": [0.0], "attention_probs_dropout_prob": [0.1], "hidden_dropout_prob": [0.1], } tune_hpo(4 * GST, hpo_space_min) ``` The validation F1/accuracy we got was 93.5/91.1, where the accuracy outperforms grid search. The test F1/accuracy was 90.1/86.1. As a result, random search with 4*GST and the minimum space overfits. We stop the troubleshooting process because the search space cannot be further reduced.
github_jupyter
# Cell Problem: In GDS format - each cell must have a unique name. Ideally the name is also consitent from different run times, in case you want to merge GDS files that were created at different times or computers. - two cells stored in the GDS file cannot have the same name. Ideally they will be references to the same Cell. See `References tutorial`. That way we only have to store that cell in memory once and all the references are just pointers to that cell. - GDS cells info: - `changed` used to create the cell - `default` in function signature - `full` full settings - name - function_name - module - child: (if any) - simulation, testing, data analysis, derived properties (for example path length of the bend) ... Solution: The decorator `@gf.cell` addresses all these issues: 1. Gives the cell a unique name depending on the parameters that you pass to it. 2. Creates a cache of cells where we use the cell name as the key. The first time the function runs, the cache stores the component, so the second time, you get the component directly from the cache, so you don't create the same cell twice. For creating new Components you need to create them inside a function, and to make sure that the component gets a good name you just need to add the `@cell` decorator Lets see how it works ``` import gdsfactory as gf @gf.cell def wg(length=10, width=1): print("BUILDING waveguide") c = gf.Component() c.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=(1, 0)) c.add_port(name="o1", midpoint=[0, width / 2], width=width, orientation=180) c.add_port(name="o2", midpoint=[length, width / 2], width=width, orientation=0) return c ``` See how the cells get the name from the parameters that you pass them ``` c = wg() print(c) # The second time you will get this cell from the cache c = wg() print(c) # If you call the cell with different parameters, the cell will get a different name c = wg(width=0.5) print(c) c.info.changed c.info.full c.info.default c.pprint() ``` thanks to `gf.cell` you can also add any metadata `info` relevant to the cell ``` c = wg(length=3, info=dict(polarization="te", wavelength=1.55)) c.pprint() print(c.info.wavelength) ``` ## Metadata Together with the GDS files that you send to the foundry you can also store some metadata in YAML for each cell containing all the settings that we used to build the GDS. the metadata will consists of all the parameters that were passed to the component function as well as derived properties - info: includes all component metadata - derived properties - external metadata (test_protocol, docs, ...) - simulation_settings - function_name - name: for the component - name_long: for the component - full: full list of settings - changed: changed settings - default: includes the default signature of the component - ports: port name, width, orientation How can you have add two different references to a cell with the same parameters? ``` import gdsfactory as gf c = gf.Component("problem") R1 = gf.components.rectangle( size=(4, 2), layer=(0, 0) ) # Creates a rectangle (same Unique ID uid) R2 = gf.components.rectangle(size=(4, 2), layer=(0, 0)) # Try Create a new rectangle that we want to change (but has the same name so we will get R1 from the cache) r1r = c << R1 # Add the first rectangle to c r2r = c << R2 # Add the second rectangle to c r2r.move((4, 2)) c print(R1 == R2) print(R1) print(R2) # lets do it cleaner with references import gdsfactory as gf c = gf.Component("solution") R = gf.components.rectangle(size=(4, 2), layer=(0, 0)) r1 = c << R # Add the first rectangle reference to c r2 = c << R # Add the second rectangle reference to c r2.rotate(45) c import gdsfactory as gf c = gf.components.straight() c.show() c.plot() ``` We can even show ports of all references with `component.show(show_subports=True)` ``` c = gf.components.mzi_phase_shifter(length_x=50) c ``` ## Cache To avoid that 2 exact cells are not references of the same cell the `cell` decorator has a cache where if a component has already been built it will return the component from the cache ``` @gf.cell def wg(length=10, width=1): c = gf.Component() c.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=(1, 0)) print("BUILDING waveguide") return c gf.clear_cache() wg1 = wg() # cell builds a straight print(wg1) wg2 = wg() # cell returns the same straight as before without having to run the function print(wg2) # notice that they have the same uuid (unique identifier) wg2.plot() from gdsfactory.cell import print_cache ``` Lets say that you change the code of the straight function in a jupyter notebook like this one. (I mostly use Vim/VsCode/Pycharm for creating new cells in python) ``` print_cache() wg3 = wg() wg4 = wg(length=11) print_cache() gf.clear_cache() ``` To enable nice notebook tutorials, every time we show a cell in Matplotlib or Klayout, you can clear the cache, in case you want to develop cells in jupyter notebooks or an IPython kernel ``` print_cache() # cache is now empty ``` ## Validate argument types By default, also `@cell` validates arguments based on their type annotations. To make sure you pass the correct arguments to the cell it runs a validator that checks the type annotations for the function. For example this will be correct ```python import gdsfactory as gf @gf.cell def straigth_waveguide(length:float): return gf.components.straight(length=length) component = straigth_waveguide(length=3) ``` While this will raise an error, because you are passing a length that is a string, so it cannot convert it to a float ```python import gdsfactory as gf @gf.cell def straigth_waveguide(length:float): return gf.components.straight(length=length) component = straigth_waveguide(length='long') ``` by default `@cell` validates all arguments using [pydantic](https://pydantic-docs.helpmanual.io/usage/validation_decorator/#argument-types) ``` @gf.cell def straigth_waveguide(length: float): print(type(length)) return gf.components.straight(length=length) # It will also convert an `int` to a `float` straigth_waveguide(3) ```
github_jupyter
# GeoEnrichment GeoEnrichment provides the ability to get facts about a location or area. Using GeoEnrichment, you can get information about the people and places in a specific area or within a certain distance or drive time from a location. It enables you to query and use information from a large collection of data sets including population, income, housing, consumer behavior, and the natural environment. This module enables you to answer questions about locations that you can't answer with maps alone. For example: What kind of people live here? What do people like to do in this area? What are their habits and lifestyles? The `enrich()` method to can be used retrieve demographics and other relevant characteristics associated with the area surrounding the requested places. You can also use the `arcgis.geoenrichment` module to obtain additional geographic context (for example, the ZIP Code of a location) and geographic boundaries (for example, the geometry for a drive-time service area). Site analysis is a popular application of this type of data enrichment. For example, GeoEnrichment can be leveraged to study the population that would be affected by the development of a new community center within their neighborhood. With the `enrich()` method, the proposed site can be submitted, and the demographics and other relevant characteristics associated with the area around the site will be returned. The following topics are covered in this guide - [Getting started](#Getting-Started) - [GeoEnrichment coverage](#GeoEnrichment-coverage) - [Filtering countries by properties](#Filtering-countries-by-properties) - [Discovering information for a country](#Discovering-information-for-a-country) - [Data collections and analysis variables](#Data-collections-and-analysis-variables) - [Available reports](#Available-reports) - [Finding named statistical areas](#Finding-named-statistical-areas) - [Searching for named areas within a country](#Searching-for-named-areas-within-a-country) - [Filtering named areas by geography level](#Filtering-named-areas-by-geography-level) - [Working with study areas](#Working-with-study-Areas) - [Accepted forms of study areas](#Accepted-forms-of-study-areas) - [Creating Reports](#Creating-Reports) - [Enriching study areas](#Enriching-study-areas) - [Enriching an existing feature layer](#Enriching-an-existing-feature-layer) - [Example: Enriching a named statistical area](#Example:-Enriching-a-named-statistical-area) - [Example: Enrich all counties in a state](#Example:-Enrich-all-counties-in-a-state) - [Example: Usage comparison levels](#Example:-Using-comparison-levels) - [Example: Enriching a street address](#Example:-Enriching-street-address) - [Example: Buffering locations using non-overlapping disks](#Example:-Buffering-locations-using-non-overlapping-disks) - [Example: Using drive times as study areas](#Example:-Using-drive-times-as-study-areas) - [Visualize results on a map](#Visualize-results-on-a-map) - [Saving GeoEnrichment results](#Saving-GeoEnrichment-Results) ## Getting Started A user must be logged on to a GIS in order to use GeoEnrichment. The geoenrichment functionality is available in the `arcgis.geoenrichment` module. ``` from arcgis.gis import GIS from arcgis.geoenrichment import * gis = GIS('https://www.arcgis.com', 'arcgis_python', 'P@ssword123') ``` ## GeoEnrichment coverage The `get_countries()` method can be used to query the countries for which there is GeoEnrichment data. You get back a list of `Country` object. You can query the properties of each `Country` object. ``` countries = get_countries() print("Number of countries for which GeoEnrichment data is available: " + str(len(countries))) #print a few countries for a sample countries[0:10] ``` ### Filtering countries by properties You can filter out the `Country` objects based on one or more of their properties. For instance, the snippet below gets the countries in Australian continent. ``` [c.properties.name for c in countries if c.properties.continent == 'Australia'] ``` ## Discovering information for a country The `Country` class can be used to discover the data collections, sub-geographies and available reports for a country. When working with a particular country, you will find it convenient to get a reference to it using the `Country.get()` method. This method accepts the country name or its [2 letter abbreviation](http://www.nationsonline.org/oneworld/country_code_list.htm) or [ISO3 code](https://unstats.un.org/unsd/tradekb/knowledgebase/country-code) and returns an instance of that country. ``` usa = Country.get('US') type(usa) ``` Commonly used properties for the country are accessible using `Country.properties`. ``` usa.properties.name usa.properties.datasets ``` ### Data collections and analysis variables GeoEnrichment uses the concept of a data collection to define the data attributes returned by the enrichment service. A data collection is a preassembled list of attributes that will be used to enrich the input features. Collection attributes can describe various types of information, such as demographic characteristics and geographic context of the locations or areas submitted as input features. The `data_collections` property of a `Country` object lists its available data collections and analysis variables under each data collection as a [Pandas](https://pandas.pydata.org/) dataframe. ``` df = usa.data_collections # print a few rows of the DataFrame df.head() # call the shape property to get the total number of rows and columns df.shape ``` Each data collection and analysis variable has a unique ID. When calling the `enrich()` method (explained later in this guide) these analysis variables can be passed in the `data_collections` and `analysis_variables` parameters. You can filter the `data_collections` and query the collections `analysis_variables` using Pandas expressions. ``` # get all the unique data collections available df.index.unique() ``` The snippet below shows how you can query the `Age` data collection and get all the unique `analysisVariable`s under that collection ``` df.loc['Age']['analysisVariable'].unique() # view a sample of the `Age` data collection df.loc['Age'].head() ``` ### Available reports GeoEnrichment also enables you to create many types of high quality reports for a variety of use cases describing the input area. The `reports` property of a `Country` object lists its available reports as a Pandas DataFrame. The report `id` you see below is used as an input in the `create_report()` method to create reports. ``` # print a sample of the reports available for USA usa.reports.head(10) # total number of reports available usa.reports.shape ``` ## Finding named statistical areas Each country has several named statistical areas in a hierarchy of geography levels (such as states, counties, zip codes, etc). The `subgeographies` property of a country can be used to discover these standard geographic/statistical areas within that country. This information is available through a heirarchy of dynamic properties (like states, counties, tracts, zip-codes,...). Each such dynamic property reflects the geographical levels within that country, with subgeographies grouped logically under the higher levels of geographies. The properties are dictionaries containing the names of the standard geographic places and their values are instances of `NamedArea` class. The `NamedArea` objects can be used as **study areas** in the `enrich()` method. <blockquote><b>Note:</b> Setting the `IPCompleter.greedy=True` configuration option in Jupyter notebook enables you to dynamically discover the various levels of subgeographies using intellisense, as in the example below:</blockquote> ``` %config IPCompleter.greedy=True usa.subgeographies.states['California'].counties['San_Bernardino_County'] usa.subgeographies.states['California'].counties['San_Bernardino_County'].tracts['060710001.03'] usa.subgeographies.states['California'].zip5['92373'] ``` The named areas can also be drawn on a map, as they include a `geometry` property. ``` m = gis.map('Redlands, CA', zoomlevel=11) m ``` ![img: Draw a subgeography](http://esri.github.io/arcgis-python-api/notebooks/nbimages/guide_ge_subgeography_01.png) ``` m.draw(usa.subgeographies.states['California'].zip5['92373'].geometry) ``` The subgeography levels and their hierarchies are different for each country, and can also be different for the different datasets (which can be set using the `Country.dataset` property). ``` india = Country.get('India') ``` Print the available datasets. ``` india.properties.datasets ``` View the current dataset of the country. ``` india.dataset ``` Inspect the various subgeographies. ``` india.subgeographies.states['Uttar_Pradesh'].districts['Baghpat'].subdistricts['Baraut'] ``` Change the dataset used for the country. Notice the difference in available subgeographies. ``` india.dataset = 'IND_Indicus' india.dataset india.subgeographies.state['Uttar_Pradesh'].district['Baghpat'] ``` ### Searching for named areas within a country ``` riversides_in_usa = usa.search('Riverside') print("number of riversides in the US: " + str(len(riversides_in_usa))) # list a few of them riversides_in_usa[:10] ``` For instance, you can make a map of all the riversides in the US ``` usamap = gis.map('United States', zoomlevel=4) usamap ``` ![img: riversides in the us](http://esri.github.io/arcgis-python-api/notebooks/nbimages/guide_ge_named_areas_02.png) ``` for riverside in riversides_in_usa: usamap.draw(riverside.geometry) ``` #### Filtering named areas by geography level ``` [level['id'] for level in usa.levels] usa.search(query='Riverside', layers=['US.Counties']) ``` ## Working with study Areas GeoEnrichment uses the concept of a study area to define the location of the point or area that you want to enrich with additional information or create reports about. ### Accepted forms of study areas - **Street address locations** - Locations can be passed as strings of input street addresses, points of interest or place names. + **Example:** `"380 New York St, Redlands, CA"` - **Multiple field input addresses** - Locations described as multiple field input addresses, using dictionaries. + **Example:** {"Address" : "380 New York Street", "City" : "Redlands", "Region" : "CA", "Postal" : 92373} - **Point and line geometries** - Point and line locations, using `arcgis.geometry` instances. + **Example Point Location: ** `arcgis.geometry.Geometry({"x":-122.435,"y":37.785})` - **Buffered study areas** - `BufferStudyArea` instances to change the ring buffer size or create drive-time service areas around points specified using one of the above methods. BufferStudyArea allows you to buffer point and street address study areas. They can be created using the following parameters: * area: the point geometry or street address (string) study area to be buffered * radii: list of distances by which to buffer the study area, eg. [1, 2, 3] * units: distance unit, eg. Miles, Kilometers, Minutes (when using drive times/travel_mode) * overlap: boolean, uses overlapping rings/network service areas when True, or non-overlapping disks when False * travel_mode: None or string, one of the supported travel modes when using network service areas + **Example Buffered Location: ** `pt = arcgis.geometry.Geometry({"x":-122.435,"y":37.785}) buffered_area = BufferStudyArea(area=pt, radii=[1,2,3], units="Miles", overlap=False)` - **Network service areas** - `BufferStudyArea` also allows you to define drive time service areas around points as well as other advanced service areas such as walking and trucking. + **Example: ** `pt = arcgis.geometry.Geometry({"x":-122.435,"y":37.785}) buffered_area = BufferStudyArea(area=pt, radii=[1,2,3], units="Minutes", travel_mode="Driving")` - **Named statistical areas** - In all previous examples of different study area types, locations were defined as either points or polygons. Study area locations can also be passed as one or many named statistical areas. This form of study area lets you define an area as a standard geographic statistical feature, such as a census or postal area, for example, to obtain enrichment information for a U.S. state, county, or ZIP Code or a Canadian province or postal code. When the NamedArea instances should be combined together (union), a list of such NamedArea instances should constitute a study area in the list of requested study areas. + **Example:** `usa.subgeographies.states['California'].zip5['92373']` - **Polygon geometries** - Locations can given as polygon geometries. + **Example Polygon geometry: ** `arcgis.geometry.Geometry({"rings":[[[-117.185412,34.063170],[-122.81,37.81],[-117.200570,34.057196],[-117.185412,34.063170]]],"spatialReference":{"wkid":4326}})` ## Creating Reports The `create_report` method allows you to create many types of high quality reports for a variety of use cases describing the input area. If a point is used as a study area, the service will create a `1` mile ring buffer around the point to collect and append enrichment data. Optionally, you can create a buffer ring or drive time service area around points of interest to generate PDF or Excel reports containing relevant information for the area on demographics, consumer spending, tapestry market, etc. To find the list of reports available, refer to the section [Available reports](#Available-reports) earlier in this page. ``` report = create_report(study_areas=["380 New York Street, Redlands, CA"], report="tapestry_profileNEW", export_format="PDF", out_folder=r"c:\xc", out_name="esri_tapestry_profile.pdf") report ``` You can find a sample PDF report [here](http://help.arcgis.com/en/geoenrichment/rest-report-samples/ex2.pdf) ## Enriching study areas The `enrich()` method returns a `SpatialDataFrame` that can either be used for mapping via the `gis.content.import_data()` method or for further analysis. If `return_geometry` is set to `False`, a Pandas DataFrame is returned instead. ### Enriching an existing feature layer The examples to be shown below return you a new `SpatialDataFrame` that you can save as a new `Item` in your GIS, use for analysis or visualize on a map. However, if you would like to enrich an existing `FeatureLayer` by adding new data, then use [the `enrich_layer()` method](http://esri.github.io/arcgis-python-api/apidoc/html/arcgis.features.enrich_data.html#enrich-layer) from the `features` module. ### Example: Enriching a named statistical area Enriching zip code 92373 in California using the 'Age' data collection: ``` redlands = usa.subgeographies.states['California'].zip5['92373'] enrich(study_areas=[redlands], data_collections=['Age'] ) ``` ### Example: Enrich all counties in a state ``` ca_counties = usa.subgeographies.states['California'].counties counties_df = enrich(study_areas=ca_counties, data_collections=['Age']) counties_df.head(10) m = gis.map('California') m lyr = gis.content.import_data(df=counties_df, title="CA county population") m.add_layer(lyr.layers[0], {'renderer': 'ClassedColorRenderer', 'field_name':'FEM0'}) ``` ### Example: Using comparison levels The information for the study areas can also be compared with standard geography areas in other levels. In the example below, the 92373 zip code intersects with both Riverside and San Bernardino counties in California, and hence both these counties as well as the state of California are returned along with the results for the named zip code, as the comparison levels includes both States and Counties. ``` enrich(study_areas=[redlands], data_collections=['Age'], comparison_levels=['US.Counties', 'US.States']) ``` ### Example: Enriching street address The example below uses a street address as a study area instead. ``` enrich(study_areas=["380 New York St Redlands CA 92373"], data_collections=['Age'], comparison_levels=['US.Counties', 'US.States']) ``` ### Example: Buffering locations using non overlapping disks The example below creates non-overlapping disks of radii 1, 3 and 5 Miles respectively from a street address and enriches these using the 'Age' data collection. ``` buffered = BufferStudyArea(area='380 New York St Redlands CA 92373', radii=[1,3,5], units='Miles', overlap=False) enrich(study_areas=[buffered], data_collections=['Age']) ``` ### Example: Using drive times as study areas The example below creates 5 and 10 minute drive times from a street address and enriches these using the 'Age' data collection. ``` buffered = BufferStudyArea(area='380 New York St Redlands CA 92373', radii=[5, 10], units='Minutes', travel_mode='Driving') drive_time_df = enrich(study_areas=[buffered], data_collections=['Age']) drive_time_df ``` ### Visualize results on a map The returned spatial dataframe can be visualized on a map as shown below: ``` redlands_map = gis.map('Redlands, CA') redlands_map.basemap = 'dark-gray-vector' redlands_map ``` ![](http://esri.github.io/arcgis-python-api/notebooks/nbimages/guide_ge_drive_times_03.png) ``` redlands_map.draw(drive_time_df.to_featureset()) ``` ## Saving GeoEnrichment Results The results can be saved back to a GIS as a feature layer #### Example: Save Data to a Feature Layer ``` gis.content.import_data(df=drive_time_df, title="Age statistics within 5,10 minutes of drive time from Esri") ```
github_jupyter
# Riskfolio-Lib Tutorial: <br>__[Financionerioncios](https://financioneroncios.wordpress.com)__ <br>__[Orenji](https://www.orenj-i.net)__ <br>__[Riskfolio-Lib](https://riskfolio-lib.readthedocs.io/en/latest/)__ <br>__[Dany Cajas](https://www.linkedin.com/in/dany-cajas/)__ <a href='https://ko-fi.com/B0B833SXD' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://cdn.ko-fi.com/cdn/kofi1.png?v=2' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a> ## Tutorial 5: Multi Assets Algorithmic Trading Backtesting with Backtrader For this tutorial we need matplotlib=3.2.2 because backtrader only works with this version of matplotlib. However, this version of matplotlib give us an error in plot_pie function. ## 1. Downloading the data: ``` import pandas as pd import datetime import yfinance as yf import backtrader as bt import numpy as np import warnings warnings.filterwarnings("ignore") # Date range start = '2010-01-01' end = '2020-12-31' # Tickers of assets assets = ['JCI', 'TGT', 'CMCSA', 'CPB', 'MO', 'APA', 'MMC', 'JPM', 'ZION', 'PSA', 'BAX', 'BMY', 'LUV', 'PCAR', 'TXT', 'TMO', 'DE', 'MSFT', 'HPQ', 'SEE', 'VZ', 'CNP', 'NI', 'T', 'BA','SPY'] assets.sort() # Downloading data prices = yf.download(assets, start=start, end=end) display(prices.head()) prices = prices.dropna() ############################################################ # Showing data ############################################################ display(prices.head()) ``` ## 2. Building the Backtest Function with Backtrader ### 2.1 Defining Backtest Function ``` ############################################################ # Defining the backtest function ############################################################ def backtest(datas, strategy, start, end, plot=False, **kwargs): cerebro = bt.Cerebro() # Here we add transaction costs and other broker costs cerebro.broker.setcash(1000000.0) cerebro.broker.setcommission(commission=0.005) # Commission 0.5% cerebro.broker.set_slippage_perc(0.005, # Slippage 0.5% slip_open=True, slip_limit=True, slip_match=True, slip_out=False) for data in datas: cerebro.adddata(data) # Here we add the indicators that we are going to store cerebro.addanalyzer(bt.analyzers.SharpeRatio, riskfreerate=0.0) cerebro.addanalyzer(bt.analyzers.Returns) cerebro.addanalyzer(bt.analyzers.DrawDown) cerebro.addstrategy(strategy, **kwargs) cerebro.addobserver(bt.observers.Value) cerebro.addobserver(bt.observers.DrawDown) results = cerebro.run(stdstats=False) if plot: cerebro.plot(iplot=False, start=start, end=end) return (results[0].analyzers.drawdown.get_analysis()['max']['drawdown'], results[0].analyzers.returns.get_analysis()['rnorm100'], results[0].analyzers.sharperatio.get_analysis()['sharperatio']) ``` ### 2.2 Building Data Feeds for Backtesting ``` ############################################################ # Create objects that contain the prices of assets ############################################################ # Creating Assets bt.feeds assets_prices = [] for i in assets: if i != 'SPY': prices_ = prices.drop(columns='Adj Close').loc[:, (slice(None), i)].dropna() prices_.columns = ['Close', 'High', 'Low', 'Open', 'Volume'] assets_prices.append(bt.feeds.PandasData(dataname=prices_, plot=False)) # Creating Benchmark bt.feeds prices_ = prices.drop(columns='Adj Close').loc[:, (slice(None), 'SPY')].dropna() prices_.columns = ['Close', 'High', 'Low', 'Open', 'Volume'] benchmark = bt.feeds.PandasData(dataname=prices_, plot=False) display(prices_.head()) ``` ## 3. Building Strategies with Backtrader ### 3.1 Buy and Hold SPY ``` ############################################################ # Building the Buy and Hold strategy ############################################################ class BuyAndHold(bt.Strategy): def __init__(self): self.counter = 0 def next(self): if self.counter >= 1004: if self.getposition(self.data).size == 0: self.order_target_percent(self.data, target=0.99) self.counter += 1 ``` If you have an error related to 'warnings' modules when you try to plot, you must modify the 'locator.py' file from backtrader library following the instructions in this __[link](https://community.backtrader.com/topic/981/importerror-cannot-import-name-min_per_hour-when-trying-to-plot/8)__. ``` ############################################################ # Run the backtest for the selected period ############################################################ %matplotlib inline import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (10, 6) # (w, h) plt.plot() # We need to do this to avoid errors in inline plot start = 1004 end = prices.shape[0] - 1 dd, cagr, sharpe = backtest([benchmark], BuyAndHold, start=start, end=end, plot=True) ############################################################ # Show Buy and Hold Strategy Stats ############################################################ print(f"Max Drawdown: {dd:.2f}%") print(f"CAGR: {cagr:.2f}%") print(f"Sharpe: {sharpe:.3f}") ``` ### 3.2 Rebalancing Quarterly using Riskfolio-Lib ``` ############################################################ # Calculate assets returns ############################################################ pd.options.display.float_format = '{:.4%}'.format data = prices.loc[:, ('Adj Close', slice(None))] data.columns = assets data = data.drop(columns=['SPY']).dropna() returns = data.pct_change().dropna() display(returns.head()) ############################################################ # Selecting Dates for Rebalancing ############################################################ # Selecting last day of month of available data index = returns.groupby([returns.index.year, returns.index.month]).tail(1).index index_2 = returns.index # Quarterly Dates index = [x for x in index if float(x.month) % 3.0 == 0 ] # Dates where the strategy will be backtested index_ = [index_2.get_loc(x) for x in index if index_2.get_loc(x) > 1000] ############################################################ # Building Constraints ############################################################ asset_classes = {'Assets': ['JCI','TGT','CMCSA','CPB','MO','APA','MMC','JPM', 'ZION','PSA','BAX','BMY','LUV','PCAR','TXT','TMO', 'DE','MSFT','HPQ','SEE','VZ','CNP','NI','T','BA'], 'Industry': ['Consumer Discretionary','Consumer Discretionary', 'Consumer Discretionary', 'Consumer Staples', 'Consumer Staples','Energy','Financials', 'Financials','Financials','Financials', 'Health Care','Health Care','Industrials','Industrials', 'Industrials','Health care','Industrials', 'Information Technology','Information Technology', 'Materials','Telecommunications Services','Utilities', 'Utilities','Telecommunications Services','Financials']} asset_classes = pd.DataFrame(asset_classes) asset_classes = asset_classes.sort_values(by=['Assets']) constraints = {'Disabled': [False, False, False], 'Type': ['All Assets', 'All Classes', 'All Classes'], 'Set': ['', 'Industry', 'Industry'], 'Position': ['', '', ''], 'Sign': ['<=', '<=', '>='], 'Weight': [0.10, 0.20, 0.03], 'Type Relative': ['', '', ''], 'Relative Set': ['', '', ''], 'Relative': ['', '', ''], 'Factor': ['', '', '']} constraints = pd.DataFrame(constraints) display(constraints) ############################################################ # Building constraint matrixes for Riskfolio Lib ############################################################ import riskfolio as rp A, B = rp.assets_constraints(constraints, asset_classes) %%time ############################################################ # Building a loop that estimate optimal portfolios on # rebalancing dates ############################################################ models = {} # rms = ['MV', 'MAD', 'MSV', 'FLPM', 'SLPM', # 'CVaR', 'WR', 'MDD', 'ADD', 'CDaR'] rms = ['MV', 'CVaR', 'WR', 'CDaR'] for j in rms: weights = pd.DataFrame([]) for i in index_: Y = returns.iloc[i-1000:i,:] # taking last 4 years (250 trading days per year) # Building the portfolio object port = rp.Portfolio(returns=Y) # Add portfolio constraints port.ainequality = A port.binequality = B # Calculating optimum portfolio # Select method and estimate input parameters: method_mu='hist' # Method to estimate expected returns based on historical data. method_cov='hist' # Method to estimate covariance matrix based on historical data. port.assets_stats(method_mu=method_mu, method_cov=method_cov, d=0.94) # Estimate optimal portfolio: port.solvers = ['MOSEK'] port.alpha = 0.05 model='Classic' # Could be Classic (historical), BL (Black Litterman) or FM (Factor Model) rm = j # Risk measure used, this time will be variance obj = 'Sharpe' # Objective function, could be MinRisk, MaxRet, Utility or Sharpe hist = True # Use historical scenarios for risk measures that depend on scenarios rf = 0 # Risk free rate l = 0 # Risk aversion factor, only useful when obj is 'Utility' w = port.optimization(model=model, rm=rm, obj=obj, rf=rf, l=l, hist=hist) if w is None: w = weights.tail(1).T weights = pd.concat([weights, w.T], axis = 0) models[j] = weights.copy() models[j].index = index_ ############################################################ # Building the Asset Allocation Class ############################################################ class AssetAllocation(bt.Strategy): def __init__(self): j = 0 for i in assets: setattr(self, i, self.datas[j]) j += 1 self.counter = 0 def next(self): if self.counter in weights.index.tolist(): for i in assets: w = weights.loc[self.counter, i] self.order_target_percent(getattr(self, i), target=w) self.counter += 1 ############################################################ # Backtesting Mean Variance Strategy ############################################################ assets = returns.columns.tolist() weights = models['MV'] dd, cagr, sharpe = backtest(assets_prices, AssetAllocation, start=start, end=end, plot=True) ############################################################ # Show Mean Variance Strategy Stats ############################################################ print(f"Max Drawdown: {dd:.2f}%") print(f"CAGR: {cagr:.2f}%") print(f"Sharpe: {sharpe:.3f}") ############################################################ # Plotting the composition of the last MV portfolio ############################################################ w = pd.DataFrame(models['MV'].iloc[-1,:]) # We need matplotlib >= 3.3.0 to use this function #ax = rp.plot_pie(w=w, title='Sharpe Mean Variance', others=0.05, nrow=25, cmap = "tab20", # height=6, width=10, ax=None) w.plot.pie(subplots=True, figsize=(8, 8)) ############################################################ # Composition per Industry ############################################################ w_classes = pd.concat([asset_classes.set_index('Assets'), w], axis=1) w_classes = w_classes.groupby(['Industry']).sum() w_classes.columns = ['weights'] display(w_classes) ############################################################ # Backtesting Mean CVaR Strategy ############################################################ assets = returns.columns.tolist() weights = models['CVaR'] dd, cagr, sharpe = backtest(assets_prices, AssetAllocation, start=start, end=end, plot=True) ############################################################ # Show CVaR Strategy Stats ############################################################ print(f"Max Drawdown: {dd:.2f}%") print(f"CAGR: {cagr:.2f}%") print(f"Sharpe: {sharpe:.3f}") ############################################################ # Plotting the composition of the last CVaR portfolio ############################################################ w = pd.DataFrame(models['CVaR'].iloc[-1,:]) # We need matplotlib >= 3.3.0 to use this function #ax = rp.plot_pie(w=w, title='Sharpe Mean CVaR', others=0.05, nrow=25, cmap = "tab20", # height=6, width=10, ax=None) w.plot.pie(subplots=True, figsize=(8, 8)) ############################################################ # Composition per Industry ############################################################ w_classes = pd.concat([asset_classes.set_index('Assets'), w], axis=1) w_classes = w_classes.groupby(['Industry']).sum() w_classes.columns = ['weights'] display(w_classes) ############################################################ # Backtesting Mean Worst Realization Strategy ############################################################ assets = returns.columns.tolist() weights = models['WR'] dd, cagr, sharpe = backtest(assets_prices, AssetAllocation, start=start, end=end, plot=True) ############################################################ # Show Worst Realization Strategy Stats ############################################################ print(f"Max Drawdown: {dd:.2f}%") print(f"CAGR: {cagr:.2f}%") print(f"Sharpe: {sharpe:.3f}") ############################################################ # Plotting the composition of the last WR portfolio ############################################################ w = pd.DataFrame(models['WR'].iloc[-1,:]) # We need matplotlib >= 3.3.0 to use this function #ax = rp.plot_pie(w=w, title='Sharpe Mean WR', others=0.05, nrow=25, cmap = "tab20", # height=6, width=10, ax=None) w.plot.pie(subplots=True, figsize=(8, 8)) ############################################################ # Composition per Industry ############################################################ w_classes = pd.concat([asset_classes.set_index('Assets'), w], axis=1) w_classes = w_classes.groupby(['Industry']).sum() w_classes.columns = ['weights'] display(w_classes) ############################################################ # Backtesting Mean CDaR Strategy ############################################################ assets = returns.columns.tolist() weights = models['CDaR'] dd, cagr, sharpe = backtest(assets_prices, AssetAllocation, start=start, end=end, plot=True) ############################################################ # Show CDaR Strategy Stats ############################################################ print(f"Max Drawdown: {dd:.2f}%") print(f"CAGR: {cagr:.2f}%") print(f"Sharpe: {sharpe:.3f}") ############################################################ # Plotting the composition of the last CDaR portfolio ############################################################ w = pd.DataFrame(models['CDaR'].iloc[-1,:]) # We need matplotlib >= 3.3.0 to use this function #ax = rp.plot_pie(w=w, title='Sharpe Mean CDaR', others=0.05, nrow=25, cmap = "tab20", # height=6, width=10, ax=None) w.plot.pie(subplots=True, figsize=(8, 8)) ############################################################ # Composition per Industry ############################################################ w_classes = pd.concat([asset_classes.set_index('Assets'), w], axis=1) w_classes = w_classes.groupby(['Industry']).sum() w_classes.columns = ['weights'] display(w_classes) ``` ## 4. Conclusion In this example, the best strategy in terms of performance is __WR__ . The ranking of strategies in base of performance follows: 1. WR (7.03%): Worst Scenario or Minimax Model. 1. SPY (6.53%): Buy and Hold SPY. 1. CVaR (5.73%): Conditional Value at Risk. 1. MV (5.68%): Mean Variance. 1. CDaR (4.60%): Conditional Drawdown at Risk. On the other hand, the best strategy in terms of Sharpe Ratio is __MV__ . The ranking of strategies in base of Sharpe Ratio follows: 1. MV (0.701): Mean Variance. 1. CVaR (0.694): Conditional Value at Risk. 1. WR (0.681): Worst Scenario or Minimax Model. 1. SPY (0.679): Buy and Hold SPY. 1. CDaR (0.622): Conditional Drawdown at Risk.
github_jupyter
**Source of the materials**: Biopython cookbook (adapted) # Quick Start This section is designed to get you started quickly with Biopython, and to give a general overview of what is available and how to use it. All of the examples in this section assume that you have some general working knowledge of Python, and that you have successfully installed Biopython on your system. If you think you need to brush up on your Python, the main Python web site provides quite a bit of free documentation to get started with (http://www.python.org/doc/). Since much biological work on the computer involves connecting with databases on the internet, some of the examples will also require a working internet connection in order to run. Now that that is all out of the way, let’s get into what we can do with Biopython. ## General overview of what Biopython provides As mentioned in the introduction, Biopython is a set of libraries to provide the ability to deal with “things” of interest to biologists working on the computer. In general this means that you will need to have at least some programming experience (in Python, of course!) or at least an interest in learning to program. Biopython’s job is to make your job easier as a programmer by supplying reusable libraries so that you can focus on answering your specific question of interest, instead of focusing on the internals of parsing a particular file format (of course, if you want to help by writing a parser that doesn’t exist and contributing it to Biopython, please go ahead!). So Biopython’s job is to make you happy! One thing to note about Biopython is that it often provides multiple ways of “doing the same thing.” Things have improved in recent releases, but this can still be frustrating as in Python there should ideally be one right way to do something. However, this can also be a real benefit because it gives you lots of flexibility and control over the libraries. The tutorial helps to show you the common or easy ways to do things so that you can just make things work. To learn more about the alternative possibilities, look in the Cookbook, the Advanced section, the built in “docstrings” (via the Python help command, or the API documentation) or ultimately the code itself. ## Working with sequences We’ll start with a quick introduction to the Biopython mechanisms for dealing with sequences, the Seq object, which we’ll discuss in more detail later. Most of the time when we think about sequences we have in my mind a string of letters like ‘AGTACACTGGT’. You can create such Seq object with this sequence as follows: ``` from Bio.Seq import Seq my_seq = Seq("AGTACACTGGT") my_seq print(my_seq) my_seq.alphabet ``` What we have here is a sequence object with a _generic_ alphabet - reflecting the fact we have _not_ specified if this is a DNA or protein sequence (okay, a protein with a lot of Alanines, Glycines, Cysteines and Threonines!). In addition to having an alphabet, the Seq object differs from the Python string in the methods it supports. You can’t do this with a plain string: ``` my_seq.complement() my_seq.reverse_complement() ``` The next most important class is the **SeqRecord** or Sequence Record. This holds a sequence (as a **Seq** object) with additional annotation including an identifier, name and description. The **Bio.SeqIO** module for reading and writing sequence file formats works with SeqRecord objects, which will be introduced below and covered in more detail later. This covers the basic features and uses of the Biopython sequence class. Now that you’ve got some idea of what it is like to interact with the Biopython libraries, it’s time to delve into the fun, fun world of dealing with biological file formats! ## A usage example Before we jump right into parsers and everything else to do with Biopython, let’s set up an example to motivate everything we do and make life more interesting. After all, if there wasn’t any biology in this tutorial, why would you want you read it? Since I love plants, I think we’re just going to have to have a plant based example (sorry to all the fans of other organisms out there!). Having just completed a recent trip to our local greenhouse, we’ve suddenly developed an incredible obsession with Lady Slipper Orchids. Of course, orchids are not only beautiful to look at, they are also extremely interesting for people studying evolution and systematics. So let’s suppose we’re thinking about writing a funding proposal to do a molecular study of Lady Slipper evolution, and would like to see what kind of research has already been done and how we can add to that. After a little bit of reading up we discover that the Lady Slipper Orchids are in the Orchidaceae family and the Cypripedioideae sub-family and are made up of 5 genera: _Cypripedium_, _Paphiopedilum_, _Phragmipedium_, _Selenipedium_ and _Mexipedium_. That gives us enough to get started delving for more information. So, let’s look at how the Biopython tools can help us. We’ll start with sequence parsing, but the orchids will be back later on as well - for example we’ll search PubMed for papers about orchids and extract sequence data from GenBank, extract data from Swiss-Prot from certain orchid proteins and work with ClustalW multiple sequence alignments of orchid proteins. ## Parsing sequence file formats A large part of much bioinformatics work involves dealing with the many types of file formats designed to hold biological data. These files are loaded with interesting biological data, and a special challenge is parsing these files into a format so that you can manipulate them with some kind of programming language. However the task of parsing these files can be frustrated by the fact that the formats can change quite regularly, and that formats may contain small subtleties which can break even the most well designed parsers. We are now going to briefly introduce the **Bio.SeqIO** module – you can find out later. We’ll start with an online search for our friends, the lady slipper orchids. To keep this introduction simple, we’re just using the NCBI website by hand. Let’s just take a look through the nucleotide databases at NCBI, using an Entrez online search (http://www.ncbi.nlm.nih.gov:80/entrez/query.fcgi?db=Nucleotide) for everything mentioning the text _Cypripedioideae_ (this is the subfamily of lady slipper orchids). When this tutorial was originally written, this search gave us only 94 hits, which we saved as a FASTA formatted text file and as a GenBank formatted text file (files ls_orchid.fasta and ls_orchid.gbk, also included with the Biopython source code under docs/tutorial/examples/). If you run the search today, you’ll get hundreds of results! When following the tutorial, if you want to see the same list of genes, just download the two files above or copy them from docs/examples/ in the Biopython source code. Below we will look at how to do a search like this from within Python. ### Simple FASTA parsing example If you open the lady slipper orchids FASTA file <a href="../data/ls_orchid.fasta">ls_orchid.fasta</a> in your favourite text editor, you’ll see that the file starts like this: > \>gi|2765658|emb|Z78533.1|CIZ78533 C.irapeanum 5.8S rRNA gene and ITS1 and ITS2 DNA CGTAACAAGGTTTCCGTAGGTGAACCTGCGGAAGGATCATTGATGAGACCGTGGAATAAACGATCGAGTG AATCCGGAGGACCGGTGTACTCAGCTCACCGGGGGCATTGCTCCCGTGGTGACCCTGATTTGTTGTTGGG ... It contains 94 records, each has a line starting with “>” (greater-than symbol) followed by the sequence on one or more lines. Now try this in Python (printing the first 5 records): ``` from Bio import SeqIO for seq_record in list(SeqIO.parse("data/ls_orchid.fasta", "fasta"))[:5]: print(seq_record.id) print(repr(seq_record.seq)) print(len(seq_record)) ``` Notice that the FASTA format does not specify the alphabet, so **Bio.SeqIO** has defaulted to the rather generic **SingleLetterAlphabet()** rather than something DNA specific. ### Simple GenBank parsing example Now let’s load the GenBank file <a href="../data/ls_orchid.gbk">ls_orchid.gbk</a> instead - notice that the code to do this is almost identical to the snippet used above for the FASTA file - the only difference is we change the filename and the format string: ``` from Bio import SeqIO for seq_record in list(SeqIO.parse("data/ls_orchid.gbk", "genbank"))[:5]: print(seq_record.id) print(repr(seq_record.seq)) print(len(seq_record)) ``` This time **Bio.SeqIO** has been able to choose a sensible alphabet, IUPAC Ambiguous DNA. You’ll also notice that a shorter string has been used as the **seq_record.id** in this case. ### I love parsing – please don’t stop talking about it! Biopython has a lot of parsers, and each has its own little special niches based on the sequence format it is parsing and all of that. While the most popular file formats have parsers integrated into *Bio.SeqIO* and/or *Bio.AlignIO*, for some of the rarer and unloved file formats there is either no parser at all, or an old parser which has not been linked in yet. Please also check the wiki pages http://biopython.org/wiki/SeqIO and http://biopython.org/wiki/AlignIO for the latest information, or ask on the mailing list. The wiki pages should include an up to date list of supported file types, and some additional examples. The next place to look for information about specific parsers and how to do cool things with them is in the Cookbook. If you don’t find the information you are looking for, please consider helping out your poor overworked documentors and submitting a cookbook entry about it! (once you figure out how to do it, that is!) ## Connecting with biological databases One of the very common things that you need to do in bioinformatics is extract information from biological databases. It can be quite tedious to access these databases manually, especially if you have a lot of repetitive work to do. Biopython attempts to save you time and energy by making some on-line databases available from Python scripts. Currently, Biopython has code to extract information from the following databases: - Entrez (and PubMed) from the NCBI - ExPASy - SCOP The code in these modules basically makes it easy to write Python code that interact with the CGI scripts on these pages, so that you can get results in an easy to deal with format. In some cases, the results can be tightly integrated with the Biopython parsers to make it even easier to extract information. ## What to do next Now that you’ve made it this far, you hopefully have a good understanding of the basics of Biopython and are ready to start using it for doing useful work. The best thing to do now is finish reading this tutorial, and then if you want start snooping around in the source code, and looking at the automatically generated documentation. Once you get a picture of what you want to do, and what libraries in Biopython will do it, you should take a peak at the Cookbook (Chapter 18), which may have example code to do something similar to what you want to do. If you know what you want to do, but can’t figure out how to do it, please feel free to post questions to the main Biopython list (see http://biopython.org/wiki/Mailing_lists). This will not only help us answer your question, it will also allow us to improve the documentation so it can help the next person do what you want to do. Enjoy the code!
github_jupyter
# Active Learning (VST ATLAS) ``` # remove after testing %load_ext autoreload %autoreload 2 import pickle import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import PolynomialFeatures from sklearn.model_selection import StratifiedShuffleSplit from sklearn.ensemble import BaggingClassifier from sklearn.svm import SVC from mclearn.active import run_active_learning_expt from mclearn.heuristics import (random_h, margin_h, entropy_h, qbb_margin_h, qbb_kl_h, pool_entropy_h, pool_variance_h) from mclearn.tools import results_exist, load_results from mclearn.preprocessing import balanced_train_test_split from mclearn.viz import plot_learning_curve_df, plot_final_accuracy, order_learning_curves %matplotlib inline sns.set_style('ticks') sns.set_palette('Dark2', 8) ``` ## Setting Up ``` running_expt = False # set to False if only want to load in results fig_dir = '../thesis/figures/' target_col = 'class' vstatlas_features = ['rmagC', 'umg', 'gmr', 'rmi', 'imz', 'rmw1', 'w1m2'] xrange = np.arange(50, 301) colors = {'Random': '#e6ab02', 'Entropy': '#e7298a', 'Margin': '#d95f02', 'QBB Margin': '#1b9e77', 'QBB KL': '#66a61e', 'Pool Variance': '#7570b3', 'Pool Entropy': '#a6761d', 'Thompson': '#666666'} linestyles = {'Random': '-', 'Entropy': '-', 'Margin': '-', 'QBB Margin': '-', 'QBB KL': '-', 'Pool Variance': '-', 'Pool Entropy': '-', 'Thompson': '-'} labels = ['Random', 'Entropy', 'Margin', 'QBB Margin', 'QBB KL', 'Pool Variance', 'Pool Entropy', 'Thompson'] upper_l = 0.97918682 upper_r = 0.99091814 if running_expt: vstatlas = pd.read_hdf('../data/vstatlas.h5', 'vstatlas') transformer = PolynomialFeatures(degree=2, interaction_only=False, include_bias=True) X = np.array(vstatlas[vstatlas_features]) X_poly = transformer.fit_transform(X) y = np.array(vstatlas[target_col]) ``` ## Balanced: Logistic Regression ``` if running_expt: logistic = LogisticRegression(multi_class='ovr', penalty='l1', C=100, random_state=2, class_weight='auto') logistic_committee = BaggingClassifier(logistic, n_estimators=11, n_jobs=-1, max_samples=300) X_pool, _, y_pool, _ = balanced_train_test_split(X_poly, y, train_size=2360, test_size=0, random_state=11) kfold = StratifiedShuffleSplit(y_pool, n_iter=10, test_size=0.3, train_size=0.7, random_state=19) heuristics = [random_h, entropy_h, margin_h, qbb_margin_h, qbb_kl_h, pool_variance_h, pool_entropy_h] thompson_path = '../pickle/07_thompson_sampling/vstatlas_balanced_logistic_thompson.pickle' pickle_paths = ['../pickle/06_active_learning/vstatlas_balanced_logistic_random.pickle', '../pickle/06_active_learning/vstatlas_balanced_logistic_entropy.pickle', '../pickle/06_active_learning/vstatlas_balanced_logistic_margin.pickle', '../pickle/06_active_learning/vstatlas_balanced_logistic_qbb_margin.pickle', '../pickle/06_active_learning/vstatlas_balanced_logistic_qbb_kl.pickle', '../pickle/06_active_learning/vstatlas_balanced_logistic_pool_variance.pickle', '../pickle/06_active_learning/vstatlas_balanced_logistic_pool_entropy.pickle'] if not results_exist(pickle_paths): run_active_learning_expt(X_pool, y_pool, kfold, logistic, logistic_committee, heuristics, pickle_paths) bl_lcs, bl_sels = zip(*load_results(pickle_paths)) bl_lcs = list(bl_lcs) bl_lcs.append(load_results(thompson_path)) bl_df = order_learning_curves(bl_lcs, labels) bl_df.columns fig = plt.figure(figsize=(4, 4)) ax = plot_learning_curve_df(xrange, bl_df, bl_df.columns[7:], colors, linestyles, upper=upper_l, ylim=(.83, 1)) fig.savefig(fig_dir+'5_active/vstatlas_bl_ind_lower.pdf', bbox_inches='tight') fig = plt.figure(figsize=(4, 4)) ax = plot_learning_curve_df(xrange, bl_df, bl_df.columns[:8], colors, linestyles, upper=upper_l, ylim=(.83, 1)) fig.savefig(fig_dir+'5_active/vstatlas_bl_ind_upper.pdf', bbox_inches='tight') fig = plt.figure(figsize=(10, 2)) ax = plot_final_accuracy(bl_lcs, labels, colors, ylim=(.9, 1)) fig.savefig(fig_dir+'5_active/vstatlas_bl_ind_violin.pdf', bbox_inches='tight') ``` ## Balanced: SVM RBF ``` if running_expt: rbf = SVC(kernel='rbf', gamma=0.001, C=1000000, cache_size=2000, class_weight='auto', probability=True) rbf_committee = BaggingClassifier(rbf, n_estimators=11, n_jobs=-1, max_samples=300) X_pool, _, y_pool, _ = balanced_train_test_split(X, y, train_size=2360, test_size=0, random_state=11) kfold = StratifiedShuffleSplit(y_pool, n_iter=10, test_size=0.3, train_size=0.7, random_state=19) heuristics = [random_h, entropy_h, margin_h, qbb_margin_h, qbb_kl_h, pool_variance_h, pool_entropy_h] thompson_path = '../pickle/07_thompson_sampling/vstatlas_balanced_rbf_thompson.pickle' pickle_paths = ['../pickle/06_active_learning/vstatlas_balanced_rbf_random.pickle', '../pickle/06_active_learning/vstatlas_balanced_rbf_entropy.pickle', '../pickle/06_active_learning/vstatlas_balanced_rbf_margin.pickle', '../pickle/06_active_learning/vstatlas_balanced_rbf_qbb_margin.pickle', '../pickle/06_active_learning/vstatlas_balanced_rbf_qbb_kl.pickle', '../pickle/06_active_learning/vstatlas_balanced_rbf_pool_variance.pickle', '../pickle/06_active_learning/vstatlas_balanced_rbf_pool_entropy.pickle'] if not results_exist(pickle_paths): run_active_learning_expt(X_pool, y_pool, kfold, rbf, rbf_committee, heuristics, pickle_paths) br_lcs, br_sels = zip(*load_results(pickle_paths)) br_lcs = list(br_lcs) br_lcs.append(load_results(thompson_path)) br_df = order_learning_curves(br_lcs, labels) br_df.columns fig = plt.figure(figsize=(4, 4)) ax = plot_learning_curve_df(xrange, br_df, br_df.columns[4:], colors, linestyles, upper=upper_r, ylim=(.83, 1)) fig.savefig(fig_dir+'5_active/vstatlas_br_ind_lower.pdf', bbox_inches='tight') fig = plt.figure(figsize=(4, 4)) ax = plot_learning_curve_df(xrange, br_df, br_df.columns[:5], colors, linestyles, upper=upper_r, ylim=(.83, 1)) fig.savefig(fig_dir+'5_active/vstatlas_br_ind_upper.pdf', bbox_inches='tight') fig = plt.figure(figsize=(10, 2)) ax = plot_final_accuracy(br_lcs, labels, colors, ylim=(.9, 1)) fig.savefig(fig_dir+'5_active/vstatlas_br_ind_violin.pdf', bbox_inches='tight') ``` ## Unbalanced: Logistic Regression ``` if running_expt: logistic = LogisticRegression(multi_class='ovr', penalty='l1', C=100, random_state=2, class_weight='auto') logistic_committee = BaggingClassifier(logistic, n_estimators=11, n_jobs=-1, max_samples=300) kfold = StratifiedShuffleSplit(y, n_iter=10, test_size=0.3, train_size=0.7, random_state=19) heuristics = [random_h, entropy_h, margin_h, qbb_margin_h, qbb_kl_h, pool_variance_h, pool_entropy_h] thompson_path = '../pickle/07_thompson_sampling/vstatlas_unbalanced_logistic_thompson.pickle' pickle_paths = ['../pickle/06_active_learning/vstatlas_unbalanced_logistic_random.pickle', '../pickle/06_active_learning/vstatlas_unbalanced_logistic_entropy.pickle', '../pickle/06_active_learning/vstatlas_unbalanced_logistic_margin.pickle', '../pickle/06_active_learning/vstatlas_unbalanced_logistic_qbb_margin.pickle', '../pickle/06_active_learning/vstatlas_unbalanced_logistic_qbb_kl.pickle', '../pickle/06_active_learning/vstatlas_unbalanced_logistic_pool_variance.pickle', '../pickle/06_active_learning/vstatlas_unbalanced_logistic_pool_entropy.pickle'] if not results_exist(pickle_paths): run_active_learning_expt(X_poly, y, kfold, logistic, logistic_committee, heuristics, pickle_paths) ul_lcs, ul_sels = zip(*load_results(pickle_paths)) ul_lcs = list(ul_lcs) ul_lcs.append(load_results(thompson_path)) ul_df = order_learning_curves(ul_lcs, labels) ul_df.columns fig = plt.figure(figsize=(4, 4)) ax = plot_learning_curve_df(xrange, ul_df, ul_df.columns[7:], colors, linestyles, upper=upper_l, ylim=(.68, 1)) fig.savefig(fig_dir+'5_active/vstatlas_ul_ind_lower.pdf', bbox_inches='tight') fig = plt.figure(figsize=(4, 4)) ax = plot_learning_curve_df(xrange, ul_df, ul_df.columns[:8], colors, linestyles, upper=upper_l, ylim=(.68, 1)) fig.savefig(fig_dir+'5_active/vstatlas_ul_ind_upper.pdf', bbox_inches='tight') fig = plt.figure(figsize=(10, 2)) ax = plot_final_accuracy(ul_lcs, labels, colors, ylim=(.8, 1)) fig.savefig(fig_dir+'5_active/vstatlas_ul_ind_violin.pdf', bbox_inches='tight') ``` ## Unbalanced: SVM RBF ``` if running_expt: rbf = SVC(kernel='rbf', gamma=0.001, C=1000000, cache_size=2000, class_weight='auto', probability=True) rbf_committee = BaggingClassifier(rbf, n_estimators=11, n_jobs=-1, max_samples=300) kfold = StratifiedShuffleSplit(y, n_iter=10, test_size=0.3, train_size=0.7, random_state=19) heuristics = [random_h, entropy_h, margin_h, qbb_margin_h, qbb_kl_h, pool_variance_h, pool_entropy_h] thompson_path = '../pickle/07_thompson_sampling/vstatlas_unbalanced_rbf_thompson.pickle' pickle_paths = ['../pickle/06_active_learning/vstatlas_unbalanced_rbf_random.pickle', '../pickle/06_active_learning/vstatlas_unbalanced_rbf_entropy.pickle', '../pickle/06_active_learning/vstatlas_unbalanced_rbf_margin.pickle', '../pickle/06_active_learning/vstatlas_unbalanced_rbf_qbb_margin.pickle', '../pickle/06_active_learning/vstatlas_unbalanced_rbf_qbb_kl.pickle', '../pickle/06_active_learning/vstatlas_unbalanced_rbf_pool_variance.pickle', '../pickle/06_active_learning/vstatlas_unbalanced_rbf_pool_entropy.pickle'] if not results_exist(pickle_paths): run_active_learning_expt(X, y, kfold, rbf, rbf_committee, heuristics, pickle_paths) ur_lcs, ur_sels = zip(*load_results(pickle_paths)) ur_lcs = list(ur_lcs) ur_lcs.append(load_results(thompson_path)) ur_df = order_learning_curves(ur_lcs, labels) ur_df.columns fig = plt.figure(figsize=(4, 4)) ax = plot_learning_curve_df(xrange, ur_df, ur_df.columns[4:], colors, linestyles, upper=upper_r, ylim=(.76, 1)) fig.savefig(fig_dir+'5_active/vstatlas_ur_ind_lower.pdf', bbox_inches='tight') fig = plt.figure(figsize=(4, 4)) ax = plot_learning_curve_df(xrange, ur_df, ur_df.columns[:5], colors, linestyles, upper=upper_r, ylim=(.76, 1)) fig.savefig(fig_dir+'5_active/vstatlas_ur_ind_upper.pdf', bbox_inches='tight') fig = plt.figure(figsize=(10, 2)) ax = plot_final_accuracy(ur_lcs, labels, colors, ylim=(.8, 1)) fig.savefig(fig_dir+'5_active/vstatlas_ur_ind_violin.pdf', bbox_inches='tight') ```
github_jupyter
# Tuples -> A tuple is similar to list -> The diffence between the two is that we can't change the elements of tuple once it is assigned whereas in the list, elements can be changed # Tuple creation ``` #empty tuple t = () #tuple having integers t = (1, 2, 3) print(t) #tuple with mixed datatypes t = (1, 'raju', 28, 'abc') print(t) #nested tuple t = (1, (2, 3, 4), [1, 'raju', 28, 'abc']) print(t) #only parenthesis is not enough t = ('satish') type(t) #need a comma at the end t = ('satish',) type(t) #parenthesis is optional t = "satish", print(type(t)) print(t) ``` # Accessing Elements in Tuple ``` t = ('satish', 'murali', 'naveen', 'srinu', 'brahma') print(t[1]) #negative index print(t[-1]) #print last element in a tuple #nested tuple t = ('ABC', ('satish', 'naveen', 'srinu')) print(t[1]) print(t[1][2]) #Slicing t = (1, 2, 3, 4, 5, 6) print(t[1:4]) #print elements from starting to 2nd last elements print(t[:-2]) #print elements from starting to end print(t[:]) ``` # Changing a Tuple #unlike lists, tuples are immutable #This means that elements of a tuple cannot be changed once it has been assigned. But, if the element is itself a mutable datatype like list, its nested items can be changed. ``` #creating tuple t = (1, 2, 3, 4, [5, 6, 7]) t[2] = 'x' #will get TypeError t[4][1] = 'satish' print(t) #concatinating tuples t = (1, 2, 3) + (4, 5, 6) print(t) #repeat the elements in a tuple for a given number of times using the * operator. t = (('satish', ) * 4) print(t) ``` # Tuple Deletion ``` #we cannot change the elements in a tuple. # That also means we cannot delete or remove items from a tuple. #delete entire tuple using del keyword t = (1, 2, 3, 4, 5, 6) #delete entire tuple del t ``` # Tuple Count ``` t = (1, 2, 3, 1, 3, 3, 4, 1) #get the frequency of particular element appears in a tuple t.count(1) ``` # Tuple Index ``` t = (1, 2, 3, 1, 3, 3, 4, 1) print(t.index(3)) #return index of the first element is equal to 3 #print index of the 1 ``` # Tuple Memebership ``` #test if an item exists in a tuple or not, using the keyword in. t = (1, 2, 3, 4, 5, 6) print(1 in t) print(7 in t) ``` # Built in Functions # Tuple Length ``` t = (1, 2, 3, 4, 5, 6) print(len(t)) ``` # Tuple Sort ``` t = (4, 5, 1, 2, 3) new_t = sorted(t) print(new_t) #Take elements in the tuple and return a new sorted list #(does not sort the tuple itself). #get the largest element in a tuple t = (2, 5, 1, 6, 9) print(max(t)) #get the smallest element in a tuple print(min(t)) #get sum of elments in the tuple print(sum(t)) ```
github_jupyter
``` import sys, os sys.path.insert(1, '/home/ning_a/Desktop/CAPTCHA/base_solver/base_solver_char') import numpy as np import torch from torch.autograd import Variable import captcha_setting import my_dataset from captcha_cnn_model import CNN, Generator from torchvision.utils import save_image import cv2 as cv from matplotlib import pyplot as plt from torch.utils.data import DataLoader, Dataset import torchvision.transforms as transforms from PIL import Image def gaussian_blur(img): image = np.array(img) image_blur = cv2.GaussianBlur(image,(65,65),10) new_image = image_blur return new_image class testdataset(Dataset): def __init__(self, folder, transform=None): self.train_image_file_paths = [os.path.join(folder, image_file) for image_file in os.listdir(folder)] self.transform = transform def __len__(self): return len(self.train_image_file_paths) def __getitem__(self, idx): image_root = self.train_image_file_paths[idx] image_name = image_root.split(os.path.sep)[-1] image = Image.open(image_root) image = image.resize((160,60), Image.ANTIALIAS) label = image_name #label = ohe.encode(image_name.split('_')[0]) # 为了方便,在生成图片的时候,图片文件的命名格式 "4个数字或者数字_时间戳.PNG", 4个字母或者即是图片的验证码的值,字母大写,同时对该值做 one-hot 处理 if self.transform is not None: image = self.transform(image) #label = self.transform(label) #label = ohe.encode(image_name.split('_')[0]) return image, label transform = transforms.Compose([ # transforms.ColorJitter(), transforms.Grayscale(), transforms.ToTensor(), # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def get_loader(): img_path = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/rescator_data/" img_path2 = "/home/ning_a/Desktop/CAPTCHA/base_GAN/train/" img_path3 = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/mania_data/" img_path4 = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/apollion_data/" img_path5 = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/darkmarket_data/" img_path6 = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/elite_data_clean_singlecolor/" img_path7 = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/apollion_data_clean_singlecolor/" img_path8 = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/yellow_data/" dataset = testdataset(img_path8, transform=transform) return DataLoader(dataset, batch_size=1, shuffle=False) dataloader = get_loader() generator = Generator() generator.load_state_dict(torch.load('/home/ning_a/Desktop/CAPTCHA/base_solver/base_solver_char/7800.pkl')) generator.eval() print("load GAN net.") img_path = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/rescator_data/" img_path1 = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/rescator_me/" img_path2 = "/home/ning_a/Desktop/CAPTCHA/base_GAN/train/" img_path4 = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/apollion_data_clean/" img_path5 = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/darkmarket_data/" img_path6 = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/elite_data_clean_singlecolor/" img_path7 = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/apollion_data_clean_singlecolor/" img_path8 = "/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/yellow_data/" # img = cv.imread(img_path+"33.png") dim = (160, 60) #img = cv.resize(img, dim, interpolation=cv.INTER_CUBIC) #img = cv.cvtColor(img,cv.COLOR_BGR2GRAY) # plt.imshow(img) # plt.show() # img.shape from torch.autograd import Variable # img = torch.tensor(img) # img = img.float() # #print(img.type()) # new_img = generator(img).data.cpu().numpy() # plt.imshow(new_img[0][0]) # plt.show() # #print(img) label_target = "" for i, (imgs, label) in enumerate(dataloader): # plt.imshow(imgs[0][0]) # plt.show() if(i<20): continue print(label) label_target = label # imgs = Image.open('new_captcha.png') # imgs = imgs.resize((160,60), Image.ANTIALIAS) # plt.imshow(imgs) # plt.show() # imgs = transform(imgs) imgs = torch.tensor(imgs).float() new_img = generator(imgs) new_img2 = new_img.data.cpu().numpy() imgs2 = imgs.data.cpu().numpy() # print(new_img2.shape) # print(label) imgs2 = imgs2[0][0] imgs2 = imgs2*255 target_img = new_img2[0][0] # plt.imshow(new_img2[0][0]) # plt.show() target_img = target_img*255 #print() # gray = cv.cvtColor(target_img,cv.COLOR_BGR2GRAY) # ret, thresh = cv.threshold(target_img,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU) cv.imwrite( "temp.jpg",imgs2) cv.imwrite( "temp_2.jpg",target_img) plt.imshow(imgs2) plt.show() plt.imshow(target_img) plt.show() # save_image(thresh, "/home/ning_a/Desktop/CAPTCHA/Solving_CAPTCHA_GAN_CNN/seged_rescator_clean/%s.png" % # label[0].split('.')[0], nrow=1, normalize=True) break # if(i>0): # break # img = cv.imread("8.png") # img = cv.imread('new_captcha.png') # img = Image.open('new_captcha.png') img = cv.imread('temp_2.jpg') img_t = cv.imread('temp_2.jpg') # img = 255-img plt.imshow(img) plt.show() #print(target_img) threshold = 5 # img = img.point(lambda p: p > threshold and 255) img = np.array(img) n_img = np.zeros((img.shape[0],img.shape[1])) img_aft = cv.normalize(img, n_img, 0,255,cv.NORM_MINMAX) plt.imshow(img_aft) plt.show() gray = cv.cvtColor(img_aft,cv.COLOR_BGR2GRAY) # gray = g ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU) ret, thresh_reverse = cv.threshold(gray,0,255,cv.THRESH_OTSU) # print(type(target_img)) import copy from PIL import Image import operator im2,contours,hierarchy = cv.findContours(thresh,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE) print(contours[0]) filter_containor = [] temp_img = copy.deepcopy(img) cur_contours = [] #print(len(contours)) for i in contours: #print(i) x, y, w, h = cv.boundingRect(i) cur_contours.append([x, y, w, h]) #break contours = sorted(cur_contours, key=operator.itemgetter(0)) print(len(contours)) # print('>>>>>>>>>>') # print(max(contours[:][0]+contours[:][2])) for i in range(0,len(contours)): x = contours[i][0] y = contours[i][1] w = contours[i][2] h = contours[i][3] #= cv.boundingRect(contours[i]) newimage=img_t[y:y+h,x:x+w] # 先用y确定高,再用x确定宽 nrootdir=("cut_image/") if (h<8) or (h*w<80) or (h*w >4000): continue color = [255, 255, 255] top, bottom, left, right = [1]*4 newimage = cv.copyMakeBorder(newimage, top, bottom, left, right, cv.BORDER_CONSTANT, value=color) #newimage = newimage = cv.resize(newimage,(30, 60), interpolation = cv.INTER_CUBIC) #filter_containor.append(newimage) cv.rectangle(img, (x,y), (x+w,y+h), (153,153,0), 1) if not os.path.isdir(nrootdir): os.makedirs(nrootdir) cv.imwrite( nrootdir+str(i)+".jpg",newimage) cv.imwrite( "temp.jpg",newimage) if(i==len(contours)-1): cur_temp_img = Image.open("temp.jpg") # filter_containor.append(cur_temp_img[]) filter_containor.append(Image.open("temp.jpg")) print (x, y, w, h) print(filter_containor) plt.imshow(img) plt.show() import torch from torch.autograd import Variable import torch.nn as nn import captcha_setting import operator import torchvision.transforms as transforms from PIL import Image #for eachimg in filter_containor: class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.layer1 = nn.Sequential( nn.Conv2d(1, 32, kernel_size=3, padding=1), nn.BatchNorm2d(32), nn.Dropout(0.1), # drop 50% of the neuron nn.ReLU(), nn.MaxPool2d(2)) self.layer2 = nn.Sequential( nn.Conv2d(32, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.Dropout(0.1), # drop 50% of the neuron nn.ReLU(), nn.MaxPool2d(2)) self.layer3 = nn.Sequential( nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.Dropout(0.1), # drop 50% of the neuron nn.ReLU(), nn.MaxPool2d(2)) self.fc = nn.Sequential( nn.Linear((captcha_setting.IMAGE_WIDTH//8)*(captcha_setting.IMAGE_HEIGHT//8)*64, 1024), nn.Dropout(0.1), # drop 50% of the neuron nn.ReLU()) self.rfc = nn.Sequential( nn.Linear(1024, 256),#captcha_setting.MAX_CAPTCHA*captcha_setting.ALL_CHAR_SET_LEN), nn.ReLU() ) self.rfc2 = nn.Sequential( nn.Linear(256, 36), ) def forward(self, x): out = self.layer1(x) out = self.layer2(out) out = self.layer3(out) out = out.view(out.size(0), -1) out = self.fc(out) #print(out.shape) out = self.rfc(out) out = self.rfc2(out) #out = out.view(out.size(0), -1) #print(out.shape) return out device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") cnn = CNN() cnn.eval() cnn.load_state_dict(torch.load('model_digit.pkl')) cnn.to(device) transform = transforms.Compose([ # transforms.ColorJitter(), transforms.Grayscale(), transforms.ToTensor(), # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) label_predicted = "" for eachimg in filter_containor: #print(eachimg) image = transform(eachimg).unsqueeze(0) plt.imshow(eachimg) plt.show() print(image.shape) image = torch.tensor(image, device=device).float() image = Variable(image).to(device) #print(image.shape) #image, labels = image.to(device), labels.to(device) # vimage = generator(image) predict_label = cnn(image) #labels = labels.cpu() predict_label = predict_label.cpu() _, predicted = torch.max(predict_label, 1) print(captcha_setting.ALL_CHAR_SET[predicted]) label_predicted += captcha_setting.ALL_CHAR_SET[predicted] label_predicted img = cv.imread('/home/ning_a/Desktop/CAPTCHA/Solving_CAPTCHA_GAN_CNN/seged_rescator/245.png') #ret, thresh = cv.threshold(test_img,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU) #ret, thresh = cv.threshold(gray,0,255,cv.THRESH_OTSU) plt.imshow(img) plt.show() #img = cv.imread(SAMPLE6_PATH) # plt.imshow(img) # plt.show() n_img = np.zeros((img.shape[0],img.shape[1])) img_aft = cv.normalize(img, n_img, 0,255,cv.NORM_MINMAX) plt.imshow(img_aft) plt.show() gray = cv.cvtColor(img_aft,cv.COLOR_BGR2GRAY) plt.imshow(gray) plt.show() ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU) #ret, thresh = cv.threshold(gray,0,255,cv.THRESH_OTSU) plt.imshow(thresh) plt.show() import copy im2,contours,hierarchy = cv.findContours(thresh,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE) filter_containor = [] temp_img = copy.deepcopy(img) for i in range(0,len(contours)): x, y, w, h = cv.boundingRect(contours[i]) newimage=img[y:y+h,x:x+w] # 先用y确定高,再用x确定宽 nrootdir=("cut_image/") if h<5 and w<5: continue filter_containor.append([x, y, w, h]) cv.rectangle(temp_img, (x,y), (x+w,y+h), (153,153,0), 1) if not os.path.isdir(nrootdir): os.makedirs(nrootdir) cv.imwrite( nrootdir+str(i)+".jpg",newimage) print (x, y, w, h) plt.imshow(temp_img) plt.show() ```
github_jupyter
# EventVestor: Index Changes In this notebook, we'll take a look at EventVestor's *Index Changes* dataset, available on the [Quantopian Store](https://www.quantopian.com/store). This dataset spans January 01, 2007 through the current day, and documents index additions and deletions to major S&P, Russell, and Nasdaq 100 indexes. ### Blaze Before we dig into the data, we want to tell you about how you generally access Quantopian Store data sets. These datasets are available through an API service known as [Blaze](http://blaze.pydata.org). Blaze provides the Quantopian user with a convenient interface to access very large datasets. Blaze provides an important function for accessing these datasets. Some of these sets are many millions of records. Bringing that data directly into Quantopian Research directly just is not viable. So Blaze allows us to provide a simple querying interface and shift the burden over to the server side. It is common to use Blaze to reduce your dataset in size, convert it over to Pandas and then to use Pandas for further computation, manipulation and visualization. Helpful links: * [Query building for Blaze](http://blaze.pydata.org/en/latest/queries.html) * [Pandas-to-Blaze dictionary](http://blaze.pydata.org/en/latest/rosetta-pandas.html) * [SQL-to-Blaze dictionary](http://blaze.pydata.org/en/latest/rosetta-sql.html). Once you've limited the size of your Blaze object, you can convert it to a Pandas DataFrames using: > `from odo import odo` > `odo(expr, pandas.DataFrame)` ### Free samples and limits One other key caveat: we limit the number of results returned from any given expression to 10,000 to protect against runaway memory usage. To be clear, you have access to all the data server side. We are limiting the size of the responses back from Blaze. There is a *free* version of this dataset as well as a paid one. The free one includes about three years of historical data, though not up to the current day. With preamble in place, let's get started: ``` # import the dataset from quantopian.interactive.data.eventvestor import index_changes # or if you want to import the free dataset, use: # from quantopian.interactive.data.eventvestor import index_changes_free # import data operations from odo import odo # import other libraries we will use import pandas as pd # Let's use blaze to understand the data a bit using Blaze dshape() index_changes.dshape # And how many rows are there? # N.B. we're using a Blaze function to do this, not len() index_changes.count() # Let's see what the data looks like. We'll grab the first three rows. index_changes[:3] ``` Let's go over the columns: - **event_id**: the unique identifier for this event. - **asof_date**: EventVestor's timestamp of event capture. - **trade_date**: for event announcements made before trading ends, trade_date is the same as event_date. For announcements issued after market close, trade_date is next market open day. - **symbol**: stock ticker symbol of the affected company. - **event_type**: this should always be *Index Change*. - **event_headline**: a brief description of the event - **index_name**: name of the index affected. Values include *S&P 400, S&P 500, S&P 600* - **change_type**: Addition/Deletion of equity - **change_reason**: reason for addition/deletion of the equity from the index. Reasons include *Acquired, Market Cap, Other*. - **event_rating**: this is always 1. The meaning of this is uncertain. - **timestamp**: this is our timestamp on when we registered the data. - **sid**: the equity's unique identifier. Use this instead of the symbol. Note: this sid represents the company the shares of which are being purchased, not the acquiring entity. We've done much of the data processing for you. Fields like `timestamp` and `sid` are standardized across all our Store Datasets, so the datasets are easy to combine. We have standardized the `sid` across all our equity databases. We can select columns and rows with ease. Below, we'll fetch all 2015 deletions due to market cap. ``` deletions = index_changes[('2014-12-31' < index_changes['asof_date']) & (index_changes['asof_date'] <'2016-01-01') & (index_changes.change_type == "Deletion")& (index_changes.change_reason == "Market Cap")] # When displaying a Blaze Data Object, the printout is automatically truncated to ten rows. deletions.sort('asof_date') ``` Now suppose we want a DataFrame of the Blaze Data Object above, want to filter it further down to the S&P 600, and we only want the sid and the asof_date. ``` df = odo(deletions, pd.DataFrame) df = df[df.index_name == "S&P 600"] df = df[['sid', 'asof_date']] df ```
github_jupyter
## Matching catalogues to the VAST Pilot Survey This notebook gives an example of how to use vast-tools in a notebook environment to perform a crossmatch between a catalogue and the VAST Pilot Survey. **Note** The settings and filters applied in this notebook, while sensible, are somewhat generic - always consider your science goals on what filters you want to make! It is **highly recommended** that results from the VAST Pipeline are used and this is what will be primarily covered in this example. It is possible to run a search just using vast-tools but the results are nowhere near as rich as the pipeline - this is covered at the of this document. ### The VAST Pipeline The pipeline hosted on the Nimbus server will contain the pipeline run for the full pilot survey. For a complete demo of what can be done with the vast-tools `Pipeline` class see the `vast-pipeline-example.ipynb` example notebook. ### The Imports Below are the imports required for this example. The main imports required from vast-tools are the Pipeline and VASTMOCS objects. The Query object is for the vast-tools query option that is shown at the end of this notebook. Astropy objects are also imported as they are critical to perfoming the crossmatch. ``` from vasttools.moc import VASTMOCS from vasttools.pipeline import Pipeline from vasttools.query import Query from mocpy import World2ScreenMPL import matplotlib.pyplot as plt from astropy import units as u from astropy.coordinates import Angle, SkyCoord ``` ### Catalogue selection For this example we will be using the `Quasars and Active Galactic Nuclei (13th Ed.) (Veron+ 2010)` catalogue, which has the Vizier ID of `VII/258`. _**Note:** Of course your catalogue doesn't have to come from Vizier. If you have a `csv` or `FITS` file then simply load this data into a DataFrame, create a SkyCoord object and you'll be good to go._ To start our search, the first question we want to answer is: *What sources from the catalogue are in the VAST Pilot Survey footprint?* This can be efficiently answered by using the `query_vizier_vast_pilot()` method in VASTMOCS. First we initialise the VASTMOCS object: ``` mocs = VASTMOCS() ``` We then use the query vizier method to obtain all the sources from the Veron catalogue which are contained within the footprint. It will likely take a bit of time to complete. ``` veron_vast_sources = mocs.query_vizier_vast_pilot('VII/258', max_rows=200000) veron_vast_sources ``` We see that 44,704 sources are within the VAST Pilot Survey footprint. _**Tip:** The table returned above is an astropy.table. This can be converted to pandas by using `veron_vast_sources = veron_vast_sources.to_pandas()`._ These can be plotted along with the VAST Pilot Survey footprint using the MOC. See the vast-mocs-example notebook for more on using MOCS and the `Wordl2ScreenMPL` method. ``` from astropy.visualization.wcsaxes.frame import EllipticalFrame fig = plt.figure(figsize=(16,8)) # Load the Epoch 1 MOC file to use epoch1_moc = mocs.load_pilot_epoch_moc('1') # with World2ScreenMPL( fig, fov=320 * u.deg, center=SkyCoord(0, 0, unit='deg', frame='icrs'), coordsys="icrs", rotation=Angle(0, u.degree), ) as wcs: ax = fig.add_subplot(111, projection=wcs, frame_class=EllipticalFrame) ax.set_title("Veron Catalogue Sources in the VAST Pilot Survey") ax.grid(color="black", linestyle="dotted") epoch1_moc.fill(ax=ax, wcs=wcs, alpha=0.5, fill=True, linewidth=0, color="#00bb00") epoch1_moc.border(ax=ax, wcs=wcs, alpha=0.5, color="black") ax.scatter( veron_vast_sources['_RAJ2000'], veron_vast_sources['_DEJ2000'], transform=ax.get_transform('world'), zorder=10, s=3 ) fig ``` ### Loading the VAST Pipeline Data Now the results of the VAST Pipeline need to be loaded. This example will not give full details of the Pipeline class, but please refer to the `vast-pipeline-example.ipynb` example notebook for a full example and description. We'll be using the full VAST Pilot Survey pipeline containing epochs 0–13 (a test version called `tiles_corrected`). ``` # below I suppress DeprecationWarnings due to ipykernel bug and an astropy warning due to FITS header warnings. import warnings from astropy.utils.exceptions import AstropyWarning warnings.simplefilter('ignore', category=AstropyWarning) warnings.filterwarnings("ignore", category=DeprecationWarning) # define pipeline object pipe = Pipeline() # load the run pipe_run = pipe.load_run('tiles_corrected') ``` We now have access to the unique sources found by the pipeline: ``` pipe_run.sources.head() ``` ### Performing the Crossmatch The crossmatch can be performed using the astropy `match_to_catalog_sky` function. The first step is to create the sky coord objects for each catalogue. First the Veron catalog which was already obtained above: ``` # Unfortunately we cannot use guess_from_table for the Vizier results, so we construct manually veron_skycoord = SkyCoord(veron_vast_sources['_RAJ2000'], veron_vast_sources['_DEJ2000'], unit=(u.deg, u.deg)) veron_names = veron_vast_sources['Name'].tolist() ``` and then by default the pipeline run object has the default sources saved as a sky coord object as `pipe_run.sources_skycoord`: ``` pipe_run.sources_skycoord ``` Now the crossmatching can be performed. See https://docs.astropy.org/en/stable/coordinates/matchsep.html#astropy-coordinates-matching for details on the astropy functions and outputs. ``` idx, d2d, d3d = veron_skycoord.match_to_catalog_sky(pipe_run.sources_skycoord) radius_limit = 15 * u.arcsec (d2d <= radius_limit).sum() ``` From above we can see that 5048 Veron objects have a match to the pipeline sources. If you wish you could merge the results together: ``` # Convert Veron to pandas first veron_vast_sources_pd = veron_vast_sources.to_pandas() # Create a d2d mask d2d_mask = d2d <= radius_limit # Select the crossmatches less than 15 arcsec veron_crossmatch_result_15asec = veron_vast_sources_pd.loc[d2d_mask].copy() # Append the id and distance of the VAST crossmatch to the veron sources veron_crossmatch_result_15asec['vast_xmatch_id'] = pipe_run.sources.iloc[idx[d2d_mask]].index.values veron_crossmatch_result_15asec['vast_xmatch_d2d_asec'] = d2d[d2d_mask].arcsec # Join the result veron_crossmatch_result_15asec = veron_crossmatch_result_15asec.merge(pipe_run.sources, how='left', left_on='vast_xmatch_id', right_index=True, suffixes=("_veron", "_vast")) veron_crossmatch_result_15asec ``` With the crossmatches in hand you can now start to do any kind of analysis you wish to perform. For example we can perform a quick check to see if the pipeline has picked out any of these sources as having significant two-epoch variability: ``` veron_crossmatch_result_15asec[veron_crossmatch_result_15asec['m_abs_significant_max_peak'] > 0.00] ``` And remember you can use the vast-toools source tools to view any source as in the other example notebooks: ``` # Get the first VAST source above from the table above first_source_id = veron_crossmatch_result_15asec[veron_crossmatch_result_15asec['m_abs_significant_max_peak'] > 0.00].iloc[0].vast_xmatch_id first_source = pipe_run.get_source(first_source_id) first_source.plot_lightcurve(min_points=1) first_source.show_all_png_cutouts(columns=5, figsize=(12,7), size=Angle(2. * u.arcmin)) ``` ### Filtering the Pipeline Sources (Optional) The example above has used all the sources from the pipeline results, but these may need to be filtered further to improve results. For example Below is an example of how to filter the sources. ``` my_query_string = ( "n_measurements >= 3 " "& n_selavy >= 2 " "& n_neighbour_dist > 1./60. " "& 0.8 < avg_compactness < 1.4 " "& n_relations == 0 " "& max_snr > 7.0" ) pipe_run_filtered_sources = pipe_run.sources.query(my_query_string) pipe_run_filtered_sources ``` You can either: * apply this to the crossmatch results above, or * substitute `pipe_run_filtered_sources` into the complete crossmatch process above in the place of `my_run.sources` (you need to create a new SkyCoord object first). ``` pipe_run_filtered_sources_skycoord = pipe_run.get_sources_skycoord(user_sources=pipe_run_filtered_sources) pipe_run_filtered_sources_skycoord ``` ### Finding All Crossmatches Between Sources The crossmatch above only finds the nearest neighbour to the sources in your catalogue. Astropy also offers the functionality to find all matches between objects within a defined radius. See https://docs.astropy.org/en/stable/coordinates/matchsep.html#searching-around-coordinates for full details. This is done by performing the below, using the 15 arcsec radius: ``` idx_vast, idx_veron, d2d, d3d = veron_skycoord.search_around_sky(pipe_run.sources_skycoord, 15 * u.arcsec) ``` A merged dataframe of this crossmatch can be made like that below. Note there are multiple matches to sources so this will generate duplicate sources within the dataframe. ``` # Create a subset dataframe of the Veron sources with a match veron_search_around_results_15asec = veron_vast_sources_pd.iloc[idx_veron].copy() # Add the VAST d2d and match id columns veron_search_around_results_15asec['vast_xmatch_d2d_asec'] = d2d.arcsec veron_search_around_results_15asec['vast_xmatch_id'] = pipe_run.sources.iloc[idx_vast].index.values # Perform the merge veron_search_around_results_15asec = veron_search_around_results_15asec.merge(pipe_run.sources, how='left', left_on='vast_xmatch_id', right_index=True, suffixes=("_veron", "_vast")) veron_search_around_results_15asec ``` This is the end of the example of performing a catalogue crossmatch using the VAST Pipeline. The information below this point is about using the vast-tools query method to find sources from the pilot survey if a pipeline run is not available. A pipeline run should be used whenever possible due to the superior quality of data it generates. ## Find VAST Matches Using VAST Tools If a pipeline run isn't available you can use VAST Tools to match to the **VAST Pilot Survey only**. Here the same Veron dataframe that was created in the pipeline section above is used. The first step is to construct a Query to see how many sources have matches to selavy componentst in the VAST Pilot Survey. In the Query definition below we use the `matches_only` argument. This means that only those sources that have an actual match are returned. I also explicitly do not select RACS to search here, I'm only interested in the VAST Pilot data, so I select `all-vast`. Note you must pre-create the output directory for the query if you intend to use it. ``` veron_query = Query( coords=veron_skycoord, source_names=veron_names, epochs='all-vast', max_sep=1.5, crossmatch_radius=10.0, base_folder='/data/vast-survey/pilot/', matches_only=True, no_rms=True, output_dir='veron-vast-crossmatching', ) ``` And run `find_sources` - again a warning that this will take a little while to process. ``` veron_query.find_sources() ``` We can check the results attribute to see how many sources return a match. ``` veron_query.results.shape[0] ``` ### Using the results 4664 sources have returned a match in the VAST Pilot Survey in any epoch. We can create new skycoord and name objects ready for a new query: ``` matches_mask = [i in (veron_query.results) for i in veron_vast_sources['Name']] matched_names = veron_vast_sources['Name'][matches_mask].tolist() matched_skycoords = veron_skycoord[matches_mask] ``` Or loop through and save all the measurements for each source. ``` # for i in veron_query.results: # i.write_measurements() ``` While you can explore the sources as normal, for example ``` my_source = veron_query.results['1AXG J134412+0016'] lc = my_source.plot_lightcurve() lc cutout = my_source.show_png_cutout('1') cutout ``` it's not recommended to produce cut outs for all sources in the notebook as this will start to take a lot of memory and be quite slow. If you'd like to do this then please use the `find_sources.py` script. ### VAST Tools Variability Unlike the Pipeline, the sources returned using this method do not contain any of the caluclated metrics. However, you can also perform some rudimentary variablility analysis on the results if you wish. I would recommened using the VAST Pipeline if possible for this kind of analysis as the associations will be much better and the you'll get a lot more information, but nevertheless this is an example of what you **can** do with the data from vast-tools. In the code below I create a dataframe from the query results (which is a pandas series) and assign it to `variables_df` and define a function that returns the eta and V metrics for each source when passed through `.apply()`. These are then assigned to new `eta` and `v` columns in the `variables_df` dataframe. ``` import pandas as pd def get_variable_metrics(row): """ Function to return the eta and v metrics using apply. """ return row['object'].calc_eta_and_v_metrics() # create the variables_df dataframe, rename the column holding the objects as 'object' variables_df = pd.DataFrame(veron_query.results).rename(columns={'name':'object'}) # obtain the metrics variables_df[['eta', 'v']] = variables_df.apply(get_variable_metrics, result_type='expand', axis=1) ``` We can then, for example, plot the log eta distribution, making sure we choose sources that have more than 2 detections. ``` %matplotlib inline mask = [i.detections > 2 for i in variables_df['object']] import numpy as np np.log10(variables_df.eta[mask]).hist(bins=100) plt.show() ``` You could then do the same for `v` and start to fit Gaussians to the distributions and select candidates. **Note** for large queries it is recommened to use the script version of `find_sources.py` to get cutouts for **all** results.
github_jupyter
# Harvesting collections of text from archived web pages <p class="alert alert-info">New to Jupyter notebooks? Try <a href="getting-started/Using_Jupyter_notebooks.ipynb"><b>Using Jupyter notebooks</b></a> for a quick introduction.</p> This notebook helps you assemble datasets of text extracted from all available captures of archived web pages. You can then feed these datasets to the text analysis tool of your choice to analyse changes over time. ### Harvest sources * Timemaps – harvest text from a single url, or list of urls, using the repository of your choice * CDX API – harvest text from the results of a query to the Internet Archive's CDX API ### Options * `filter_text=False` (default) – save all of the human visible text on the page, this includes boilerplate, footers, and navigation text. * `filter_text=True` – save only the significant text on the page, excluding recurring items like boilerplate and navigation. This is done by [Trafilatura](https://trafilatura.readthedocs.io/en/latest/index.html). ### Usage #### Using Timemaps ``` python get_texts_for_url([timegate], [url], filter_text=[True or False]) ``` The `timegate` value should be one of: * `nla` – National Library of Australia * `nlnz` – National Library of New Zealand * `bl` – UK Web Archive * `ia` – Internet Archive #### Using the Internet Archive's CDX API Use a CDX query to find all urls that include the specified keyword in their url. ``` python get_texts_for_cdx_query([url], filter_text=[True or False], filter=['original:.*[keyword].*', 'statuscode:200', 'mimetype:text/html']) ``` The `url` value can use wildcards to indicate whether it is a domain or prefix query, for example: * `nla.gov.au/*` – prefix query, search all files under `nla.gov.au` * `*.nla.gov.au` – domain query, search all files under `nla.gov.au` and any of its subdomains You can use any of the keyword parameters that the CDX API recognises, but you probably want to filter for `statuscode` and `mimetype` and apply some sort of regular expression to `original`. ### Output A directory will be created for each url processed. The name of the directory will be a slugified version of the url in SURT (Sort-friendly URI Reordering Transform) format. Each text file will be saved separately within the directory. Filenames follow the pattern: ``` [SURT formatted url]-[capture timestamp].txt ``` There's also a `metadata.json` file that includes basic details of the harvest: * `timegate` - the repository used * `url` – the url harvested * `filter_text` – text filtering option used * `date` – date and time the harvest was started * `mementos` – details of each capture, including: * `url` – link to capture in web archive * `file_path` – path to harvested text file ## Import what we need ``` import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry import re import pandas as pd from bs4 import BeautifulSoup from surt import surt from pathlib import Path from slugify import slugify from tqdm.auto import tqdm import trafilatura import arrow import json import time from lxml.etree import ParserError from IPython.display import display, FileLink, FileLinks s = requests.Session() retries = Retry(total=10, backoff_factor=1, status_forcelist=[ 502, 503, 504 ]) s.mount('https://', HTTPAdapter(max_retries=retries)) s.mount('http://', HTTPAdapter(max_retries=retries)) # Default list of repositories -- you could add to this TIMEGATES = { 'nla': 'https://web.archive.org.au/awa/', 'nlnz': 'https://ndhadeliver.natlib.govt.nz/webarchive/wayback/', 'bl': 'https://www.webarchive.org.uk/wayback/archive/', 'ia': 'https://web.archive.org/web/' } ``` ## Define some functions ``` def is_memento(url): ''' Is this url a Memento? Checks for the presence of a timestamp. ''' return bool(re.search(r'/\d{14}(?:id_|mp_|if_)*/http', url)) def get_html(url): ''' Retrieve the original HTML content of an archived page. Follow redirects if they go to another archived page. Return the (possibly redirected) url from the response and the HTML content. ''' # Adding the id_ hint tells the archive to give us the original harvested version, without any rewriting. url = re.sub(r'/(\d{14})(?:mp_)*/http', r'/\1id_/http', url) response = requests.get(url, allow_redirects=True) # Some captures might redirect themselves to live versions # If the redirected url doesn't look like a Memento rerun this without redirection if not is_memento(response.url): response = requests.get(url, allow_redirects=False) return {'url': response.url, 'html': response.content} def convert_lists_to_dicts(results): ''' Converts IA style timemap (a JSON array of arrays) to a list of dictionaries. Renames keys to standardise IA with other Timemaps. ''' if results: keys = results[0] results_as_dicts = [dict(zip(keys, v)) for v in results[1:]] else: results_as_dicts = results # Rename keys for d in results_as_dicts: d['status'] = d.pop('statuscode') d['mime'] = d.pop('mimetype') d['url'] = d.pop('original') return results_as_dicts def get_capture_data_from_memento(url, request_type='head'): ''' For OpenWayback systems this can get some extra cpature info to insert in Timemaps. ''' if request_type == 'head': response = requests.head(url) else: response = requests.get(url) headers = response.headers length = headers.get('x-archive-orig-content-length') status = headers.get('x-archive-orig-status') status = status.split(' ')[0] if status else None mime = headers.get('x-archive-orig-content-type') mime = mime.split(';')[0] if mime else None return {'length': length, 'status': status, 'mime': mime} def convert_link_to_json(results, enrich_data=False): ''' Converts link formatted Timemap to JSON. ''' data = [] for line in results.splitlines(): parts = line.split('; ') if len(parts) > 1: link_type = re.search(r'rel="(original|self|timegate|first memento|last memento|memento)"', parts[1]).group(1) if link_type == 'memento': link = parts[0].strip('<>') timestamp, original = re.search(r'/(\d{14})/(.*)$', link).groups() capture = {'timestamp': timestamp, 'url': original} if enrich_data: capture.update(get_capture_data_from_memento(link)) data.append(capture) return data def get_timemap_as_json(timegate, url): ''' Get a Timemap then normalise results (if necessary) to return a list of dicts. ''' tg_url = f'{TIMEGATES[timegate]}timemap/json/{url}/' response = requests.get(tg_url) response_type = response.headers['content-type'] # pywb style Timemap if response_type == 'text/x-ndjson': data = [json.loads(line) for line in response.text.splitlines()] # IA Wayback stype Timemap elif response_type == 'application/json': data = convert_lists_to_dicts(response.json()) # Link style Timemap (OpenWayback) elif response_type in ['application/link-format', 'text/html;charset=utf-8']: data = convert_link_to_json(response.text) return data def get_all_text(capture_data): ''' Get all the human visible text from a web page, including headers, footers, and navigation. Does some cleaning up to remove multiple spaces, tabs, and newlines. ''' try: text = BeautifulSoup(capture_data['html']).get_text() except TypeError: return None else: # Remove multiple newlines text = re.sub(r'\n\s*\n', '\n\n', text) # Remove multiple spaces or tabs with a single space text = re.sub(r'( |\t){2,}', ' ', text) # Remove leading spaces text = re.sub(r'\n ', '\n', text) # Remove leading newlines text = re.sub(r'^\n*', '', text) return text def get_main_text(capture_data): ''' Get only the main text from a page, excluding boilerplate and navigation. ''' try: text = trafilatura.extract(capture_data['html']) except ParserError: text = '' return text def get_text_from_capture(capture_url, filter_text=False): ''' Get text from the given memento. If filter_text is True, only return the significant text (excluding things like navigation). ''' capture_data = get_html(capture_url) if filter_text: text = get_main_text(capture_data) else: text = get_all_text(capture_data) return text def process_capture_list(timegate, captures, filter_text=False, url=None): if not url: url = captures[0]['url'] metadata = { 'timegate': TIMEGATES[timegate], 'url': url, 'filter_text': filter_text, 'date': arrow.now().format('YYYY-MM-DD HH:mm:ss'), 'mementos': [] } try: urlkey = captures[0]['urlkey'] except KeyError: urlkey = surt(url) # Truncate urls longer than 50 chars so that filenames are not too long output_dir = Path('text', slugify(urlkey)[:50]) output_dir.mkdir(parents=True, exist_ok=True) for capture in tqdm(captures, desc='Captures'): file_path = Path(output_dir, f'{slugify(urlkey)[:50]}-{capture["timestamp"]}.txt') # Don't reharvest if file already exists if not file_path.exists(): # Only process successful captures (or all for NLNZ) if timegate == 'nlnz' or capture['status'] == '200': capture_url = f'{TIMEGATES[timegate]}{capture["timestamp"]}id_/{capture["url"]}' capture_text = get_text_from_capture(capture_url, filter_text) if capture_text: # Truncate urls longer than 50 chars so that filenames are not too long file_path = Path(output_dir, f'{slugify(urlkey)[:50]}-{capture["timestamp"]}.txt') file_path.write_text(capture_text) metadata['mementos'].append({'url': capture_url, 'text_file': str(file_path)}) time.sleep(0.2) metadata_file = Path(output_dir, 'metadata.json') with metadata_file.open('wt') as md_json: json.dump(metadata, md_json) def save_texts_from_url(timegate, url, filter_text=False): ''' Save the text contents of all available captures for a given url from the specified repository. Saves both the harvested text files and a json file with the harvest metadata. ''' timemap = get_timemap_as_json(timegate, url) if timemap: process_capture_list(timegate, timemap, url=url, filter_text=filter_text) def prepare_params(url, **kwargs): ''' Prepare the parameters for a CDX API requests. Adds all supplied keyword arguments as parameters (changing from_ to from). Adds in a few necessary parameters. ''' params = kwargs params['url'] = url params['output'] = 'json' params['pageSize'] = 5 # CDX accepts a 'from' parameter, but this is a reserved word in Python # Use 'from_' to pass the value to the function & here we'll change it back to 'from'. if 'from_' in params: params['from'] = params['from_'] del(params['from_']) return params def get_total_pages(params): ''' Get number of pages in a query. Note that the number of pages doesn't tell you much about the number of results, as the numbers per page vary. ''' these_params = params.copy() these_params['showNumPages'] = 'true' response = s.get('http://web.archive.org/cdx/search/cdx', params=these_params, headers={'User-Agent': ''}) return int(response.text) def get_cdx_data(params): ''' Make a request to the CDX API using the supplied parameters. Return results converted to a list of dicts. ''' response = s.get('http://web.archive.org/cdx/search/cdx', params=params) response.raise_for_status() results = response.json() try: if not response.from_cache: time.sleep(0.2) except AttributeError: # Not using cache time.sleep(0.2) return convert_lists_to_dicts(results) def harvest_cdx_query(url, **kwargs): ''' Harvest results of query from the IA CDX API using pagination. Returns captures as a list of dicts. ''' results = [] page = 0 params = prepare_params(url, **kwargs) total_pages = get_total_pages(params) with tqdm(total=total_pages-page, desc='CDX') as pbar: while page < total_pages: params['page'] = page results += get_cdx_data(params) page += 1 pbar.update(1) return results def save_texts_from_cdx_query(url, filter_text=False, **kwargs): captures = harvest_cdx_query(url, **kwargs) if captures: df = pd.DataFrame(captures) groups = df.groupby(by='urlkey') print(f'{len(groups)} matching urls') for name, group in groups: process_capture_list('ia', group.to_dict('records'), filter_text=filter_text) ``` ## Harvesting a single url or list of urls You don't have to run all of these cases. Any case will illustrate what is going on. The output is in directory 'text'. Case 4 may be particularly time consuming and run out of resources. Get all human-visible text from all captures of a single url, https://covid19.nih.gov/ ``` # CASE 1 -- save unfiltered text from https://covid19.nih.gov/ # After results were saved in 'text' directory, renamed the directory to 'text_CASE1_Unfiltered' save_texts_from_url('ia', 'https://covid19.nih.gov/', filter_text=False) ``` Get only significant text from all captures of a single url, https://covid19.nih.gov/ ``` # CASE 2 -- save filtered text from https://covid19.nih.gov/ (This may overwrite the results from CASE 1 if you use the same URL.) # After results were saved in 'text' directory, renamed the directory to 'text_CASE2_Filtered' save_texts_from_url('ia', 'https://covid19.nih.gov/', filter_text=True) ``` Harvest text from a series of urls. ``` # CASE 3 -- save filtered text from the URLs listed below. # This may overwrite the results from CASE 1 and CASE 2 if there is overlap of the URLs. # After results were saved in 'text' directory, renamed the directory to 'text_CASE3_3SampleUrls' urls = [ 'https://combatcovid.hhs.gov/', 'https://www.cdc.gov/vaccines/covid-19/vaccinate-with-confidence.html', 'https://www.hhs.gov/coronavirus/education-campaign/' ] for url in urls: save_texts_from_url('ia', url, filter_text=True) ``` ## Harvesting matching pages from a domain Harvest text from all pages under the `www.nih.gov` domain that include the word `mask` in the url. Note the use of the regular expression `.*mask.*` to match the `original` url. ``` # CASE 4 -- save filtered text from html URLs matching www.nih.gov/*mask* # This may overwrite the results from CASE 1, 2 and 3 above if the URLs overlap with them. # This case takes a LONG time to run. And you don't know when it will finish, if ever. save_texts_from_cdx_query('www.nih.gov/*', filter_text=True, filter=['original:.*mask.*', 'statuscode:200', 'mimetype:text/html']) ``` ## Viewing and downloading the results If you're using Jupyter Lab, you can browse the results of this notebook by just looking inside the `domains` folder. I've also enabled the `jupyter-archive` extension which adds a download option to the right-click menu. Just right click on a folder and you'll see an option to 'Download as an Archive'. This will zip up and download the folder. The cells below provide a couple of alternative ways of viewing and downloading the results. ``` # OPTIONAL. # Display all the files under the current domain folder (this could be a long list) display(FileLinks('text')) # OPTIONAL # Tar/gzip the current domain folder !tar -czf text.tar.gz text # OPTIONAL # Display a link to the gzipped data # In JupyterLab you'll need to Shift+right-click on the link and choose 'Download link' display(FileLink('text.tar.gz')) ``` ---- Created by [Tim Sherratt](https://timsherratt.org) for the [GLAM Workbench](https://glam-workbench.github.io). Work on this notebook was supported by the [IIPC Discretionary Funding Programme 2019-2020](http://netpreserve.org/projects/)
github_jupyter
# Name Batch prediction using Cloud Machine Learning Engine # Label Cloud Storage, Cloud ML Engine, Kubeflow, Pipeline, Component # Summary A Kubeflow Pipeline component to submit a batch prediction job against a deployed model on Cloud ML Engine. # Details ## Intended use Use the component to run a batch prediction job against a deployed model on Cloud ML Engine. The prediction output is stored in a Cloud Storage bucket. ## Runtime arguments | Argument | Description | Optional | Data type | Accepted values | Default | |--------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|--------------|-----------------|---------| | project_id | The ID of the Google Cloud Platform (GCP) project of the job. | No | GCPProjectID | | | | model_path | The path to the model. It can be one of the following:<br/> <ul> <li>projects/[PROJECT_ID]/models/[MODEL_ID]</li> <li>projects/[PROJECT_ID]/models/[MODEL_ID]/versions/[VERSION_ID]</li> <li>The path to a Cloud Storage location containing a model file.</li> </ul> | No | GCSPath | | | | input_paths | The path to the Cloud Storage location containing the input data files. It can contain wildcards, for example, `gs://foo/*.csv` | No | List | GCSPath | | | input_data_format | The format of the input data files. See [REST Resource: projects.jobs](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#DataFormat) for more details. | No | String | DataFormat | | | output_path | The path to the Cloud Storage location for the output data. | No | GCSPath | | | | region | The Compute Engine region where the prediction job is run. | No | GCPRegion | | | | output_data_format | The format of the output data files. See [REST Resource: projects.jobs](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#DataFormat) for more details. | Yes | String | DataFormat | JSON | | prediction_input | The JSON input parameters to create a prediction job. See [PredictionInput](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#PredictionInput) for more information. | Yes | Dict | | None | | job_id_prefix | The prefix of the generated job id. | Yes | String | | None | | wait_interval | The number of seconds to wait in case the operation has a long run time. | Yes | | | 30 | ## Input data schema The component accepts the following as input: * A trained model: It can be a model file in Cloud Storage, a deployed model, or a version in Cloud ML Engine. Specify the path to the model in the `model_path `runtime argument. * Input data: The data used to make predictions against the trained model. The data can be in [multiple formats](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#DataFormat). The data path is specified by `input_paths` and the format is specified by `input_data_format`. ## Output Name | Description | Type :--- | :---------- | :--- job_id | The ID of the created batch job. | String output_path | The output path of the batch prediction job | GCSPath ## Cautions & requirements To use the component, you must: * Set up a cloud environment by following this [guide](https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction#setup). * The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details. * Grant the following types of access to the Kubeflow user service account: * Read access to the Cloud Storage buckets which contains the input data. * Write access to the Cloud Storage bucket of the output directory. ## Detailed description Follow these steps to use the component in a pipeline: 1. Install the Kubeflow Pipeline SDK: ``` %%capture --no-stderr !pip3 install kfp --upgrade ``` 2. Load the component using KFP SDK ``` import kfp.components as comp mlengine_batch_predict_op = comp.load_component_from_url( 'https://raw.githubusercontent.com/kubeflow/pipelines/1.5.0-rc.0/components/gcp/ml_engine/batch_predict/component.yaml') help(mlengine_batch_predict_op) ``` ### Sample Code Note: The following sample code works in an IPython notebook or directly in Python code. In this sample, you batch predict against a pre-built trained model from `gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/` and use the test data from `gs://ml-pipeline-playground/samples/ml_engine/census/test.json`. #### Inspect the test data ``` !gsutil cat gs://ml-pipeline-playground/samples/ml_engine/census/test.json ``` #### Set sample parameters ``` # Required Parameters PROJECT_ID = '<Please put your project ID here>' GCS_WORKING_DIR = 'gs://<Please put your GCS path here>' # No ending slash # Optional Parameters EXPERIMENT_NAME = 'CLOUDML - Batch Predict' OUTPUT_GCS_PATH = GCS_WORKING_DIR + '/batch_predict/output/' ``` #### Example pipeline that uses the component ``` import kfp.dsl as dsl import json @dsl.pipeline( name='CloudML batch predict pipeline', description='CloudML batch predict pipeline' ) def pipeline( project_id = PROJECT_ID, model_path = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/', input_paths = '["gs://ml-pipeline-playground/samples/ml_engine/census/test.json"]', input_data_format = 'JSON', output_path = OUTPUT_GCS_PATH, region = 'us-central1', output_data_format='', prediction_input = json.dumps({ 'runtimeVersion': '1.10' }), job_id_prefix='', wait_interval='30'): mlengine_batch_predict_op( project_id=project_id, model_path=model_path, input_paths=input_paths, input_data_format=input_data_format, output_path=output_path, region=region, output_data_format=output_data_format, prediction_input=prediction_input, job_id_prefix=job_id_prefix, wait_interval=wait_interval) ``` #### Compile the pipeline ``` pipeline_func = pipeline pipeline_filename = pipeline_func.__name__ + '.zip' import kfp.compiler as compiler compiler.Compiler().compile(pipeline_func, pipeline_filename) ``` #### Submit the pipeline for execution ``` #Specify pipeline argument values arguments = {} #Get or create an experiment and submit a pipeline run import kfp client = kfp.Client() experiment = client.create_experiment(EXPERIMENT_NAME) #Submit a pipeline run run_name = pipeline_func.__name__ + ' run' run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) ``` #### Inspect prediction results ``` OUTPUT_FILES_PATTERN = OUTPUT_GCS_PATH + '*' !gsutil cat OUTPUT_FILES_PATTERN ``` ## References * [Component python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_batch_predict.py) * [Component docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile) * [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/ml_engine/batch_predict/sample.ipynb) * [Cloud Machine Learning Engine job REST API](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs) ## License By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
github_jupyter
``` import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import shutil from skimage.external.tifffile import imsave,imread myDir=r'/project/hackathon/hackers03/shared/patches/raw_20X' fileList=os.listdir(myDir) fileList newDir=r'/project/hackathon/hackers03/shared/single_tiff' try: os.mkdir(newDir) except: pass i=0 myDataFrame=pd.DataFrame(columns=['file','patch','phase_path','ch1','ch2']) for myFile in fileList: temp=np.load(os.path.join(myDir,myFile)) for myCell in range(temp.shape[0]): # save phase image phase_im = temp[myCell,0,:,:] phase_im = (phase_im-np.min(phase_im))/(np.max(phase_im)-np.min(phase_im))*255 phase_path=os.path.join(newDir,f'cell_{str(i).zfill(6)}_phase.tif') imsave(phase_path,phase_im.astype('uint8')) # measure fluorescent signal ch_fucci1=np.mean(temp[myCell,1,120:130,120:130]) ch_fucci2=np.mean(temp[myCell,2,120:130,120:130]) # save data in a data frame myDataFrame.loc[i,'file']=myFile myDataFrame.loc[i,'patch']=myCell myDataFrame.loc[i,'phase_path']=phase_path myDataFrame.loc[i,'ch1']=ch_fucci1 myDataFrame.loc[i,'ch2']=ch_fucci2 i=i+1 len(myDataFrame) myDataFrame.head() test=imread(myDataFrame.loc[1100,'phase_path']) %matplotlib notebook plt.imshow(test,cmap='gray') ``` ### Look at the distribution of fluorescent signals ``` %matplotlib notebook fig,ax=plt.subplots(1,figsize=(10,8)) plt.scatter(myDataFrame.loc[:,'ch1'],myDataFrame.loc[:,'ch2']) %matplotlib notebook sns.jointplot(x='ch1', y='ch2', data=myDataFrame, kind="kde"); ``` ### Assign labels ``` myDataFrame['ratio']=myDataFrame.ch2/myDataFrame.ch1 %matplotlib notebook myDataFrame['ratio'].hist(bins=100) arbPerc=np.percentile(myDataFrame['ratio'],50) arbPerc myDataFrame['label']='early' myDataFrame.loc[myDataFrame.ratio>arbPerc,'label']='late' myDataFrame.head() myDataFrame.to_csv('/project/hackathon/hackers03/shared/raw20_df.csv') shutil.make_archive('/project/hackathon/hackers03/shared/single.zip', 'zip', '/project/hackathon/hackers03/shared/single_tiff') def convert_bytes(num): """ this function will convert bytes to MB.... GB... etc """ for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.0: return "%3.1f %s" % (num, x) num /= 1024.0 def file_size(file_path): """ this function will return the file size """ if os.path.isfile(file_path): file_info = os.stat(file_path) return convert_bytes(file_info.st_size) file_size('/project/hackathon/hackers03/shared/single.zip') ```
github_jupyter
# Desafio 4 Neste desafio, vamos praticar um pouco sobre testes de hipóteses. Utilizaremos o _data set_ [2016 Olympics in Rio de Janeiro](https://www.kaggle.com/rio2016/olympic-games/), que contém dados sobre os atletas das Olimpíadas de 2016 no Rio de Janeiro. Esse _data set_ conta com informações gerais sobre 11538 atletas como nome, nacionalidade, altura, peso e esporte praticado. Estaremos especialmente interessados nas variáveis numéricas altura (`height`) e peso (`weight`). As análises feitas aqui são parte de uma Análise Exploratória de Dados (EDA). > Obs.: Por favor, não modifique o nome das funções de resposta. ## _Setup_ geral ``` import pandas as pd import matplotlib.pyplot as plt import numpy as np import scipy.stats as sct import seaborn as sns import statsmodels.api as sm #%matplotlib inline from IPython.core.pylabtools import figsize figsize(12, 8) sns.set() athletes = pd.read_csv("athletes.csv") athletes.info() athletes.head() athletes[['height','weight']].describe() athletes[['height','weight']].hist() def get_sample(df, col_name, n=100, seed=42): """Get a sample from a column of a dataframe. It drops any numpy.nan entries before sampling. The sampling is performed without replacement. Example of numpydoc for those who haven't seen yet. Parameters ---------- df : pandas.DataFrame Source dataframe. col_name : str Name of the column to be sampled. n : int Sample size. Default is 100. seed : int Random seed. Default is 42. Returns ------- pandas.Series Sample of size n from dataframe's column. """ np.random.seed(seed) random_idx = np.random.choice(df[col_name].dropna().index, size=n, replace=False) #retorna uma array com index das colunas return df.loc[random_idx, col_name] #retorna uma series com index e valor da coluna ``` ## Inicia sua análise a partir daqui ``` # Sua análise começa aqui. ``` ## Questão 1 Considerando uma amostra de tamanho 3000 da coluna `height` obtida com a função `get_sample()`, execute o teste de normalidade de Shapiro-Wilk com a função `scipy.stats.shapiro()`. Podemos afirmar que as alturas são normalmente distribuídas com base nesse teste (ao nível de significância de 5%)? Responda com um boolean (`True` ou `False`). ``` def q1(): amostra_q1 = get_sample(athletes,'height', n=3000, seed=42) stat, p = sct.shapiro(amostra_q1) print('stat= {}, p={}'.format(stat,p)) return bool(p> 0.05) q1() ``` __Para refletir__: * Plote o histograma dessa variável (com, por exemplo, `bins=25`). A forma do gráfico e o resultado do teste são condizentes? Por que? * Plote o qq-plot para essa variável e a analise. * Existe algum nível de significância razoável que nos dê outro resultado no teste? (Não faça isso na prática. Isso é chamado _p-value hacking_, e não é legal). ``` amostra_q1 = get_sample(athletes,'height', n=3000, seed=42) sns.distplot(amostra_q1, bins=25, hist_kws={"density": True}) plt.show () sm.qqplot(amostra_q1, fit=True, line="45") plt.show () amostra_q1 = get_sample(athletes,'height', n=3000, seed=42) stat, p = sct.shapiro(amostra_q1) p > 0.0000001 ``` ## Questão 2 Repita o mesmo procedimento acima, mas agora utilizando o teste de normalidade de Jarque-Bera através da função `scipy.stats.jarque_bera()`. Agora podemos afirmar que as alturas são normalmente distribuídas (ao nível de significância de 5%)? Responda com um boolean (`True` ou `False`). ``` def q2(): amostra_q2 = get_sample(athletes,'height', n=3000, seed=42) stat, p = sct.jarque_bera(amostra_q2) print('stat= {}, p={}'.format(stat,p)) return bool(p> 0.05) q2() ``` __Para refletir__: * Esse resultado faz sentido? ``` amostra_q2 = get_sample(athletes,'height', n=3000, seed=42) sm.qqplot(amostra_q2, fit=True, line="45") plt.show () ``` ## Questão 3 Considerando agora uma amostra de tamanho 3000 da coluna `weight` obtida com a função `get_sample()`. Faça o teste de normalidade de D'Agostino-Pearson utilizando a função `scipy.stats.normaltest()`. Podemos afirmar que os pesos vêm de uma distribuição normal ao nível de significância de 5%? Responda com um boolean (`True` ou `False`). ``` def q3(): amostra_q3 = get_sample(athletes,'weight', n=3000, seed=42) stat, p = sct.normaltest(amostra_q3) print('stat= {}, p={}'.format(stat,p)) return bool(p> 0.05) q3() ``` __Para refletir__: * Plote o histograma dessa variável (com, por exemplo, `bins=25`). A forma do gráfico e o resultado do teste são condizentes? Por que? * Um _box plot_ também poderia ajudar a entender a resposta. ``` amostra_q3 = get_sample(athletes,'weight', n=3000, seed=42) sns.distplot(amostra_q3, bins=25, hist_kws={"density": True}) plt.show () sns.boxplot(data = amostra_q3) ``` ## Questão 4 Realize uma transformação logarítmica em na amostra de `weight` da questão 3 e repita o mesmo procedimento. Podemos afirmar a normalidade da variável transformada ao nível de significância de 5%? Responda com um boolean (`True` ou `False`). ``` def q4(): amostra_q4 = get_sample(athletes,'weight', n=3000, seed=42) amostra_q4_transformada = np.log(amostra_q4) stat, p = sct.normaltest(amostra_q4_transformada) print('stat= {}, p={}'.format(stat,p)) return bool(p> 0.05) q4() ``` __Para refletir__: * Plote o histograma dessa variável (com, por exemplo, `bins=25`). A forma do gráfico e o resultado do teste são condizentes? Por que? * Você esperava um resultado diferente agora? ``` amostra_q4 = get_sample(athletes,'weight', n=3000, seed=42) amostra_q4_transformada = np.log(amostra_q4) sns.distplot(amostra_q4_transformada, bins=25, hist_kws={"density": True}) plt.show () sns.boxplot(data = amostra_q4_transformada) ``` > __Para as questão 5 6 e 7 a seguir considere todos testes efetuados ao nível de significância de 5%__. ## Questão 5 Obtenha todos atletas brasileiros, norte-americanos e canadenses em `DataFrame`s chamados `bra`, `usa` e `can`,respectivamente. Realize um teste de hipóteses para comparação das médias das alturas (`height`) para amostras independentes e variâncias diferentes com a função `scipy.stats.ttest_ind()` entre `bra` e `usa`. Podemos afirmar que as médias são estatisticamente iguais? Responda com um boolean (`True` ou `False`). ``` athletes.columns athletes[(athletes.nationality == 'BRA') | (athletes.nationality == 'USA') | (athletes.nationality == 'CAN')] bra = athletes[athletes.nationality == 'BRA'] usa = athletes[athletes.nationality == 'USA'] can = athletes[athletes.nationality == 'CAN'] bra['height'].describe() bra.isna().sum() usa['height'].describe() usa.isna().sum() can['height'].describe() can.isna().sum() def q5(): stat, p = sct.ttest_ind(bra['height'], usa['height'], equal_var = False, nan_policy = 'omit') #False: se falso, execute o teste t de Welch, que não assume igual variação populaciona print('stat= {}, p={}'.format(stat,p)) return bool(p> 0.05) q5() sns.distplot(bra['height'], bins=25, hist=False, rug=True, label='BRA') sns.distplot(usa['height'], bins=25, hist=False, rug=True, label='USA') ``` ## Questão 6 Repita o procedimento da questão 5, mas agora entre as alturas de `bra` e `can`. Podemos afimar agora que as médias são estatisticamente iguais? Reponda com um boolean (`True` ou `False`). ``` def q6(): stat, p = sct.ttest_ind(bra['height'], can['height'], equal_var = False, nan_policy = 'omit') #False: se falso, execute o teste t de Welch, que não assume igual variação populaciona print('stat= {}, p={}'.format(stat,p)) return bool(p> 0.05) q6() sns.distplot(bra['height'], bins=25, hist=False, rug=True, label='BRA') sns.distplot(can['height'], bins=25, hist=False, rug=True, label='CAN') ``` ## Questão 7 Repita o procedimento da questão 6, mas agora entre as alturas de `usa` e `can`. Qual o valor do p-valor retornado? Responda como um único escalar arredondado para oito casas decimais. ``` def q7(): stat, p = sct.ttest_ind(usa['height'], can['height'], equal_var = False, nan_policy = 'omit') #False: se falso, execute o teste t de Welch, que não assume igual variação populaciona print('stat= {}, p={}'.format(stat,p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') return float(np.round(p, 8)) q7() ``` __Para refletir__: * O resultado faz sentido? * Você consegue interpretar esse p-valor? * Você consegue chegar a esse valor de p-valor a partir da variável de estatística? ``` stat, p = sct.ttest_ind(usa['height'], can['height'], equal_var = True, nan_policy = 'omit') print('stat= {}, p={}'.format(stat,p)) #grau de liberdade para o teste t independente com variancias semelhantes: df = n1 + n2 - 2 gl = len(usa) + len(can) - 2 print(f"Graus de liberdade: {gl}") q7_sf = sct.t.sf(stat, gl)*2 #Para Hipótese Bicaudal print(q7_sf) sns.distplot(usa['height'], bins=25, hist=False, rug=True, label='USA') sns.distplot(can['height'], bins=25, hist=False, rug=True, label='CAN') ```
github_jupyter
# 4. Theory and Mathematical Details Interaction Primitives [1] are basic building blocks that model the movements between multiple agents in an interaction. This section will give a brief literature overview of the origins of Interaction Primitives before delving into a mathematical treatment of the three supported Interaction Primitive types, the: Interaction Probabilistic Movement Primitive, Bayesian Interaction Primitive, and ensemble Bayesian Interaction Primitive. ## Background Inspired by Dynamical Movement Primitives [2], which were themselves inspired by the biological concept of Motor Primitives [3], Interaction Primitives probabilistically model the relationship multiple degrees of freedom distributed across two or more interacting agents. In the original formulation, Interaction Primitives were presented as a solution for exactly two agents, however, without loss of generality this formulation also applies to more than two agents (and in fact only one agent if some of the degrees of freedom are unobserved). In both Dynamical Movement Primitives and Interaction Primitives, the trajectory of each degree of freedom is modeled as a nonlinear dynamical system. The parameters of the system -- specifically, the coefficients of the forcing function -- are determined from a set of demonstrations. In a Dynamic Movement Primitive, the system starts at some initial position and is pulled toward a specified goal position based on the attractor dynamics of the system. In this case, all degrees of freedom are observed at all times. Interaction Primitives extends this methodology by assuming some degrees of freedom are unknown (those associated with a controlled agent) and there is no known goal position. The system parameters associated with the unobserved degrees of freedom are estimated from a partial trajectory of the observed degrees of freedom (those associated with an observed agent). While this is a powerful formulation, it falls short in one respect: it is non-trivial to combine multiple primitives together as building blocks in a more complex movement or interaction. While several solutions have been proposed to address this weakness, one of the most promising is the Probabilistic Movement Primitive [4]. In this formulation, the dynamical system has been eliminated entirely and replaced with a state space model composed of the parameterized degrees of freedom. Each degree of freedom is related to each other with a full joint probability distribution, allowing a well-defined probabilistic interpretation and the ability to easily combine and blend multiple primitives together, at the cost of the stability and convergence guarantees provided by the dynamical system. Such a formulation is easily extended to the interaction case (referred to as Interaction Probabilistic Movement Primitives) in which one or more degrees of freedom are unobserved [5]. One of the challenges that Interaction Primitives must deal with is the integration of a partial trajectory for the estimation of the unknown model parameters. In both Dynamic Movement Primitives and Probabilistic Movement Primitives, the parameterization of each degree of freedom is time-dependent. As such, the length of the partial trajectory must be estimated relative to the demonstration trajectories. In the standard formulation of Interaction Primitives, this estimation occurs as a separate sequence alignment step that is performed prior to parameter estimation. However, as this is a separate step, none of the uncertainty information associated with the model parameters is utilized. These are necessarily correlated since both the timing and shape of a trajectory are inherent properties of an interaction movement. Bayesian Interaction Primitives [6] were introduced as a solution, in which the length of the partial trajectory is simultaneously estimated with the model parameters in a fully probabilistic manner. | Method | Model Type | Inference Type | Analysis Type | | --- | --- | --- | --- | | Interaction Primitives | Dynamical System | Spatial | Exact | | Interaction Probabilistic Movement Primitive | Probabilistic | Spatial | Exact | | Bayesian Interaction Primitives | Probabilistic | Spatiotemporal | Exact | | Ensemble Bayesian Interaction Primitives | Probabilistic | Spatiotemporal | Approximate | The above overview is high-level in nature as it is only intended to provide a background on the origins of Interaction Primitives. Of the listed algorithms, Interaction Probabilistic Movement Primitives, Bayesian Interaction Primitives, and ensemble Bayesian Interaction Primitives are supported by this library and so they will receive in an in-depth treatment in the following sections. ## Interaction Probabilistic Movement Primitive We define an interaction $\boldsymbol{Y}$ as a time series of $D$-dimensional sensor observations over time, $\boldsymbol{Y}_{1:T} = [\boldsymbol{y}_1, \dots, \boldsymbol{y}_T] \in \mathbb{R}^{D \times T}$. Of the $D$ dimensions, $D_o$ of them represent \emph{observed} DoFs from one agent (the human) and $D_c$ of them represent the \emph{controlled} DoFs from the other agent (the robot), such that $D = D_c + D_o$. ### Basis Function Decomposition Rather than working directly with the time series, the interaction $\boldsymbol{Y}$ is represented with a state space model. With a standard parameterization where the number of state variables is equivalent to the number of degrees of freedom, the shape of the trajectory is captured through non-trivial state transition dynamics. This is undesirable, as the trajectory shape needs to be modifiable online based on observations, which is difficult to accomplish if the shape is modeled solely by the transition dynamics. Instead, we transform the interaction $\boldsymbol{Y}$ into a latent space via basis function decomposition such that the trajectory shape is modeled as part of the state itself. With such a model, the shape of the trajectory -- including the goal the point -- can be adjusted by simply updating the state estimate. Each dimension $d \in D$ of $\boldsymbol{Y}$ is approximated with a weighted linear combination of time-dependent nonlinear basis functions, such that $y_t^d = h^d(\phi(t), \boldsymbol{w}^d) = \Phi_{\phi(t)}^{\intercal} \boldsymbol{w}^d + \epsilon_y$, where $\Phi_{\phi(t)} \in \mathbb{R}^{1\times B}$ is a row vector of $B^d$ basis functions, $\boldsymbol{w}^d \in \mathbb{R}^{B \times 1}$, and $\epsilon_y$ is i.i.d. Gaussian noise. As this is a linear system with a closed-form solution, the weights $\boldsymbol{w}^d$ can be found through simple linear regression, i.e., least squares. The full latent model is composed of the aggregated weights from each dimension, $\boldsymbol{w} = [\boldsymbol{w}^{1\intercal}, \dots, \boldsymbol{w}^{D\intercal}] \in \mathbb{R}^{1 \times B}$ where $B = \sum_{d}^{D} B^d$ and $\boldsymbol{y}_t = h(\phi(t), \boldsymbol{w})$. We note that the time-dependence of the basis functions -- and the nonlinear function $h(\cdot)$ -- is not on the absolute time $t$, but rather on a relative phase value $\phi(t)$. Consider the basis function decompositions for a motion performed at slow speeds and fast speeds with a fixed measurement rate. If the time-dependence is based on the absolute time $t$, then the decompositions will be different despite the motion being spatially identical. Thus, we substitute the absolute time $t$ with a linearly interpolated relative phase value, $\phi(t)$, such that $\phi(0) = 0$ and $\phi(T) = 1$. For notational simplicity, from here on we refer to $\phi(t)$ as simply $\phi$. ### Temporal Estimation via Sequence Alignment Given $t$ observations of an interaction, $\boldsymbol{Y}_{1:t}$, the goal of Interaction Primitives is to infer the underlying latent model $\boldsymbol{w}$ while taking into account a prior model $\boldsymbol{w}_0$. We assume that the $t$ observations made so far are of a partial interaction, i.e., $\phi(t) < 1$, and that $T$ is unknown. Formally, we define the solution to this problem as the following posterior distribution: (1) $$ \begin{equation*} p(\boldsymbol{w}_t | \boldsymbol{Y}_{1:t}, \boldsymbol{w}_{0}) \propto p(\boldsymbol{y}_{t} | \boldsymbol{w}_t) p(\boldsymbol{w}_t | \boldsymbol{Y}_{1:t-1}, \boldsymbol{w}_{0}). \end{equation*} $$ As the latent state space utilizes phase-dependent basis functions, we must estimate $\phi(t)$ before we can update our state estimate with the partial observations. In Interaction Probabilistic Movement Primitives, this is accomplished with the application of a sequence alignment algorithm -- Dynamic Time Warping -- against the demonstration trajectories. Given the mean demonstration trajectory $\boldsymbol{\bar{Y}}$ of length $M$ and the partially observed trajectory $\boldsymbol{Y}$ of length $T$, Dynamic Time Warping computes the optimal alignment which minimizes a distance function $c(\cdot)$ (typically Euclidean) as follows. $$ \begin{align*} D(1,t) &= \sum_{k=1}^{t} c(\boldsymbol{\bar{y}}_1, \boldsymbol{y}_t), m \in [1 : T], \\ D(m,1) &= \sum_{k=1}^{m} c(\boldsymbol{\bar{y}}_k, \boldsymbol{y}_1), t \in [1 : M], \\ D(m,t) &= \text{min}[D(m-1,t-1), D(m,t-1), D(m-1,t)] + c(\boldsymbol{\bar{y}}_m, \boldsymbol{y}_t) \end{align*} $$ This recursion can be terminated upon finding the distance to the pair $(m*,T)$ where $m* = \text{argmin}_{m} D(m,T)$. The estimated phase is then given by the ratio $\phi = \frac{m*}{M}$. ### Exact Spatial Inference The posterior density in Eq. 1 is computed with a recursive linear state space filter, i.e., a standard Kalman filter. Such filters are composed of two steps performed recursively: state prediction in which the state is propagated forward in time according to the system dynamics $p(\boldsymbol{s}_t | \boldsymbol{Y}_{1:t-1}, \boldsymbol{s}_{0})$, and measurement update in which the latest sensor observation is incorporated in the predicted state $p(\boldsymbol{y}_{t} | \boldsymbol{s}_t)$. Applying Markov assumptions, the state prediction density can be defined as: $$ \begin{align*} & p(\boldsymbol{w}_t | \boldsymbol{Y}_{1:t-1}, \boldsymbol{w}_{0}) \nonumber \\ & = \int p(\boldsymbol{w}_t | \boldsymbol{w}_{t-1}) p(\boldsymbol{w}_{t-1} | \boldsymbol{Y}_{1:t-1}, \boldsymbol{w}_{0})d\boldsymbol{w}_{t-1}. \end{align*} $$ As with all Kalman filters, we assume that all error estimates produced during recursion are normally distributed, i.e., $p(\boldsymbol{w}_t | \boldsymbol{Y}_{1:t}, \boldsymbol{w}_{0}) = \mathcal{N}(\boldsymbol{\mu}_{t|t}, \boldsymbol{\Sigma}_{t|t})$ and $p(\boldsymbol{w}_t | \boldsymbol{Y}_{1:t-1}, \boldsymbol{w}_{0}) = \mathcal{N}(\boldsymbol{\mu}_{t|t-1}, \boldsymbol{\Sigma}_{t|t-1})$. The state itself is time-invariant, therefore the state transition matrix $\boldsymbol{G}$ is simply the identity $$ \begin{align*} \boldsymbol{\mu}_{t|t-1} &= {\underbrace{ \begin{bmatrix} 1 & \dots & 0\\ \vdots & \ddots & \vdots\\ 0 & \dots & 1 \end{bmatrix} }_\text{$\boldsymbol{G}$}} \boldsymbol{\mu}_{t-1|t-1}, \\ \boldsymbol{\Sigma}_{t|t-1} &= \boldsymbol{G} \boldsymbol{\Sigma}_{t-1|t-1} \boldsymbol{G}^{\intercal} + \underbrace{\begin{bmatrix} 1 & \dots & 0\\ \vdots & \ddots & \vdots\\ 0 & \dots & 1 \end{bmatrix}}_\text{$\boldsymbol{Q}_t$}, \end{align*} $$ and so is $\boldsymbol{Q}$, the process noise associated with the state transition update. The observation function $h(\cdot)$ is linear with respect to the state variable $\boldsymbol{w}$ which yields a straightforward observation matrix $\boldsymbol{H}$: $$ \begin{align*} \begin{split} \boldsymbol{H}_t &= \frac{\partial h(\boldsymbol{w}_t)}{\partial h_t}\\ &= \begin{bmatrix} \Phi_{\phi} & \dots & 0\\ \vdots & \ddots & \vdots\\ 0 & \dots & \Phi_{\phi} \end{bmatrix}. \end{split} \end{align*} $$ We can integrate the measurement by calculating the innovation covariance as well as the Kalman gain, which dictates how heavily the observation should be weighted: $$ \begin{align*} \boldsymbol{S}_t &= \boldsymbol{H}_t \boldsymbol{\Sigma}_{t|t-1} \boldsymbol{H}_t^{\intercal} + \boldsymbol{R}_t,\\ \boldsymbol{K}_t &= \boldsymbol{\Sigma}_{t|t-1} \boldsymbol{H}_t^{\intercal} \boldsymbol{S}_t^{-1}. \end{align*} $$ This enables the calculation of the parameters for the posterior distribution, $$ \begin{align*} \boldsymbol{\mu}_{t|t} &= \boldsymbol{\mu}_{t|t-1} + \boldsymbol{K}_t(\boldsymbol{y}_t - h(\boldsymbol{\mu}_{t|t-1})),\\ \boldsymbol{\Sigma}_{t|t} &= (I - \boldsymbol{K}_t \boldsymbol{H}_t)\boldsymbol{\Sigma}_{t|t-1}, \end{align*} $$ where $\boldsymbol{R}_t$ is the Gaussian measurement noise associated with the sensor observation $\boldsymbol{y}_t$. The prior model $\boldsymbol{w}_0$ is computed from a set of initial demonstrations. That is, given the latent models for $N$ demonstrations, $\boldsymbol{W} = [\boldsymbol{w}_1^{\intercal}, \dots, \boldsymbol{w}_N^{\intercal}]$, we define $\boldsymbol{w}_0$ as simply the arithmetic mean of each DoF: $$ \begin{equation*} \boldsymbol{w}_0 = \left[\frac{1}{N}\sum_{i=1}^{N}\boldsymbol{w}^1_i, \dots, \frac{1}{N}\sum_{i=1}^{N}\boldsymbol{w}^D_i\right]. \end{equation*} $$ where $T_i$ is the length of the $i$-th demonstration. The prior density is defined as $p(\boldsymbol{w}_0) = \mathcal{N}(\boldsymbol{\mu}_0, \boldsymbol{\Sigma}_0)$ where $$ \begin{align*} \boldsymbol{\mu}_0 &= \boldsymbol{w}_0,\\ \boldsymbol{\Sigma}_0 &= \boldsymbol{\Sigma}_{\boldsymbol{W}, \boldsymbol{W}} \end{align*} $$ and $\boldsymbol{\Sigma}_{\boldsymbol{W}, \boldsymbol{W}}$ is the sample covariance of the basis weights. The baseline measurement noise $\boldsymbol{R}$ is calculated from the set of initial demonstrations with the following closed-form solution: $$ \begin{align*} \boldsymbol{R} = \frac{1}{N} \sum_{i}^{N} \frac{1}{T_i} \sum_{t}^{T_i} \left( \boldsymbol{y}_{t} - h(\phi(t), \boldsymbol{w}_i) \right)^{2}. \end{align*} $$ This value is equivalent to the mean squared error of the regression fit for our basis functions over every demonstration. Intuitively, this represents the variance of the data around the regression and captures both the approximation error and the sensor noise associated with the observations. ## Bayesian Interaction Primitives Bayesian Interaction Primitives are based upon the observation that the temporal and spatial estimation problems are inherently correlated, and that we are throwing information away by solving only one problem at a time in isolation. In other words, if we mis-estimate where we are in the interaction in a temporal sense, we will mis-estimate where we are in a spatial sense as well. This is based upon insights from the simultaneous localization and mapping (SLAM) field, where spatial errors in the robot's state estimate result in correlated errors in each map landmark. Intuitively, this can be understood by imagining that we are wearing a blindfold and walking around a room filled with a set of landmarks, such as walls and tables and chairs. Furthermore, we are allowed to periodically remove our blindfold and make an observation. As we move around while blindfolded, we maintain an estimate of where we think we are (our localized state) relative to each landmark in the room (the map). Suppose we remove our blindfold after moving and find that a landmark is not where we expected it to be. If this is the only landmark we know of in the room, then it is difficult to determine the source of the error: is it due to a bad initial observation of the landmark, i.e., a noisy measurement? Or is it due to us being wrong about where we think we are with respect to the landmark, i.e., our state estimate? Now suppose there are five landmarks in the room. If four of the landmarks are exactly where we thought they would be, but the fifth one isn't, then the source of the error is much more likely to be a noisy observation of the fifth landmark rather than an error in our state (as that would require noisy observations of the other four landmarks to coincidentally be correct). But what if all five landmarks are in different positions than we expected? If the source of the error is our state estimate, then we would expect there to be some sort of relationship between the errors of the landmarks. Thus we can succinctly describe this dual optimization problem as follows: an error in the state estimate induces a correlated error in the landmark estimates. This same scenario holds when considering interactions, only in this case our state estimate is our temporal location, the phase $\phi$, and the landmarks are the weights of our decomposed DoF trajectories, $\boldsymbol{w}$. The above explanation was rather simple for illustrative purposes, as there are many other factors to consider, for example how accurate we estimate our observations to inherently be, how accurate we expect our state estimate to be over time, etc. Complicating this even more is the fact that our "map" is not static! People can often be unpredictable and they may change their trajectories at any point in an interaction. So even if our estimate of the weights of the interaction (our "map") is correct at one point in time, this does not guarantee that it will be in the future. In the SLAM example, this would be as if the tables and chairs would move about on their own while we are blindfolded; or if the walls take a page out of `Inception` and start changing their shape. ### Exact Spatiotemporal Inference We formulate this spatiotemporal inference problem much the same as before, except our state vector must be expanded to account for the temporal terms. Probabilistically, we represent this insight with the augmented state vector $\boldsymbol{s} = [\phi, \dot{\phi}, \boldsymbol{w}]$ and the following definition: $$ \begin{equation*} p(\boldsymbol{s}_t | \boldsymbol{Y}_{1:t}, \boldsymbol{s}_{0}) \propto p(\boldsymbol{y}_{t} | \boldsymbol{s}_t) p(\boldsymbol{s}_t | \boldsymbol{Y}_{1:t-1}, \boldsymbol{s}_{0}). \end{equation*} $$ It is important to note that while the weights themselves are time-invariant with respect to an interaction, our estimate of the weights \emph{is} time-varying. That is, every time we integrate a new sensor observation, our estimate of the underlying latent model is updated. As before, we compute the posterior with a two-step recursive filter: state prediction in which the state is propagated forward in time according to the system dynamics $p(\boldsymbol{s}_t | \boldsymbol{Y}_{1:t-1}, \boldsymbol{s}_{0})$, and measurement update in which the latest sensor observation is incorporated in the predicted state $p(\boldsymbol{y}_{t} | \boldsymbol{s}_t)$. Applying Markov assumptions, the state prediction density can be defined as: $$ \begin{align*} & p(\boldsymbol{s}_t | \boldsymbol{Y}_{1:t-1}, \boldsymbol{s}_{0}) \nonumber \\ & = \int p(\boldsymbol{s}_t | \boldsymbol{s}_{t-1}) p(\boldsymbol{s}_{t-1} | \boldsymbol{Y}_{1:t-1}, \boldsymbol{s}_{0})d\boldsymbol{s}_{t-1}. \end{align*} $$ Again, we assume that all error estimates produced during recursion are normally distributed, i.e., $p(\boldsymbol{s}_t | \boldsymbol{Y}_{1:t}, \boldsymbol{s}_{0}) = \mathcal{N}(\boldsymbol{\mu}_{t|t}, \boldsymbol{\Sigma}_{t|t})$ and $p(\boldsymbol{s}_t | \boldsymbol{Y}_{1:t-1}, \boldsymbol{s}_{0}) = \mathcal{N}(\boldsymbol{\mu}_{t|t-1}, \boldsymbol{\Sigma}_{t|t-1})$. Unlike before, however, the temporal terms of our state do evolve over time. For simplicity, we assume that the state evolves according to a linear constant velocity model: (2) $$ \begin{align*} \boldsymbol{\mu}_{t|t-1} &= {\underbrace{ \begin{bmatrix} 1 & \Delta t & \dots & 0\\ 0 & 1 & \dots & 0\\ \vdots & \vdots & \ddots & \vdots\\ 0 & 0 & \dots & 1 \end{bmatrix} }_\text{$\boldsymbol{G}$}} \boldsymbol{\mu}_{t-1|t-1}, \\ \boldsymbol{\Sigma}_{t|t-1} &= \boldsymbol{G} \boldsymbol{\Sigma}_{t-1|t-1} \boldsymbol{G}^{\intercal} + \underbrace{\begin{bmatrix} \boldsymbol{\Sigma}_{\phi, \dot{\phi}} & \dots & 0\\ \vdots & \ddots & \vdots\\ 0 & \dots & 1 \end{bmatrix}}_\text{$\boldsymbol{Q}_t$} \end{align*} $$ where $\boldsymbol{Q}$ is the process noise associated with the state transition update. The noise correlations between phase and phase velocity, $\boldsymbol{\Sigma}_{\phi, \dot{\phi}}$, are determined by a piecewise or continuous first-order white noise model, e.g., $$ \begin{align*} \boldsymbol{\Sigma}_{\phi, \dot{\phi}} = \begin{bmatrix} \frac{\Delta t^4}{4} & \frac{\Delta t^3}{3}\\ \frac{\Delta t^3}{3} & \Delta t^2 \end{bmatrix} \sigma^2_\phi. \nonumber \end{align*} $$ Unlike in Interaction ProMP, the observation function $h(\cdot)$ is now nonlinear with respect to the state variable $\phi$ and must be linearized via Taylor expansion: (3) $$ \begin{align*} \begin{split} \boldsymbol{H}_t &= \frac{\partial h(\boldsymbol{s}_t)}{\partial s_t}\\ &= \begin{bmatrix} \frac{\partial \Phi_{\phi}^{\intercal} \boldsymbol{w}^1}{\partial \phi} & 0 & \Phi_{\phi} & \dots & 0\\ \vdots & \vdots & \vdots & \ddots & \vdots\\ \frac{\partial \Phi_{\phi}^{\intercal} \boldsymbol{w}^{D}}{\partial \phi} & 0 & 0 & \dots & \Phi_{\phi} \end{bmatrix}. \end{split} \end{align*} $$ Note that because the augmented state now includes the phase $\phi$, the observation function $h(\boldsymbol{s})$ is simply a function of $\boldsymbol{s}$ in order to reduce notational clutter. We can now integrate the measurement by calculating the innovation covariance as well as the Kalman gain, which dictates how heavily the observation should be weighted: $$ \begin{align*} \boldsymbol{S}_t &= \boldsymbol{H}_t \boldsymbol{\Sigma}_{t|t-1} \boldsymbol{H}_t^{\intercal} + \boldsymbol{R}_t,\\ \boldsymbol{K}_t &= \boldsymbol{\Sigma}_{t|t-1} \boldsymbol{H}_t^{\intercal} \boldsymbol{S}_t^{-1}. \end{align*} $$ This enables the calculation of the parameters for the posterior distribution, (4) $$ \begin{align*} \boldsymbol{\mu}_{t|t} &= \boldsymbol{\mu}_{t|t-1} + \boldsymbol{K}_t(\boldsymbol{y}_t - h(\boldsymbol{\mu}_{t|t-1})),\\ \boldsymbol{\Sigma}_{t|t} &= (I - \boldsymbol{K}_t \boldsymbol{H}_t)\boldsymbol{\Sigma}_{t|t-1}, \end{align*} $$ where $\boldsymbol{R}_t$ is the Gaussian measurement noise associated with the sensor observation $\boldsymbol{y}_t$. The prior model $\boldsymbol{s}_0 = [\phi_0, \dot{\phi}_0, \boldsymbol{w}_0]$ is computed from a set of initial demonstrations. That is, given the latent models for $N$ demonstrations, $\boldsymbol{W} = [\boldsymbol{w}_1^{\intercal}, \dots, \boldsymbol{w}_N^{\intercal}]$, we define $\boldsymbol{w}_0$ as simply the arithmetic mean of each DoF: $$ \begin{equation*} \boldsymbol{w}_0 = \left[\frac{1}{N}\sum_{i=1}^{N}\boldsymbol{w}^1_i, \dots, \frac{1}{N}\sum_{i=1}^{N}\boldsymbol{w}^D_i\right]. \end{equation*} $$ The initial phase $\phi_0$ is set to $0$ under the assumption that all interactions start from the beginning. The initial phase velocity $\dot{\phi}_0$ is the arithmetic mean of the phase velocity of each demonstration: $$ \begin{equation*} \dot{\phi}_0 = \frac{1}{N} \sum_{i=1}^N \frac{1}{T_i}, \end{equation*} $$ where $T_i$ is the length of the $i$-th demonstration. The prior density is defined as $p(\boldsymbol{s}_0) = \mathcal{N}(\boldsymbol{\mu}_0, \boldsymbol{\Sigma}_0)$ where (5) $$ \begin{align*} \boldsymbol{\mu}_0 &= \boldsymbol{s}_0, \\ \boldsymbol{\Sigma}_0 &= \begin{bmatrix} \boldsymbol{\Sigma}_{\phi, \phi} & 0 & 0\\ 0 & \boldsymbol{\Sigma}_{\dot{\phi}, \dot{\phi}} & 0\\ 0 & 0 & \boldsymbol{\Sigma}_{\boldsymbol{W}, \boldsymbol{W}} \end{bmatrix}, \end{align*} $$ and $\boldsymbol{\Sigma}_{\phi, \phi}$ is the sample variance of the phases of the demonstrations, $\boldsymbol{\Sigma}_{\dot{\phi}, \dot{\phi}}$ is the sample variance of the phase velocities, and $\boldsymbol{\Sigma}_{\boldsymbol{W}, \boldsymbol{W}}$ is the sample covariance of the basis weights. The measurement noise $\boldsymbol{R}$ is calculated in the same manner as before. ## Ensemble Bayesian Interaction Primitives While Bayesian Interaction Primitives is an analytical method to compute the exact solution to the simultaneous temporal and spatial estimation problem, it suffers from several drawbacks in practice: 1) the prior distribution is assumed to be Gaussian, which is unlikely to be the case; 2) the first-order Taylor approximation introduces (potentially significant) linearization errors; and 3) the integration of the covariance matrix is computationally prohibitive for large state dimensions. Therefore, in addition to the exact solution yielded by Bayesian Interaction Primitives, we also present a Monte Carlo-based method known as ensemble Bayesian Interaction Primitives (eBIP). Originally motivated as a solution to multimodal applications which amplify the above problems [7], this method also yields improvements in inference accuracy and computational performance in the general case. ### Approximate Spatiotemporal Inference The extended Kalman filter employed for recursive filtering in BIP relies on the assumption that uncertainty in the state prediction is approximately Gaussian. When this is not the case, the estimated state can diverge rapidly from the true state. One potential source of non-normality in the uncertainty is the nonlinear state transition or observation function in the dynamical system. The original formulation of BIP addresses this challenge by linearizing these functions about the estimated state via first-order Taylor approximation, which is performed in Eq. 3 for the nonlinear observation function $h(\cdot)$. Unfortunately, this produces linearization errors resulting from the loss of information related to higher-order moments. In strongly nonlinear systems this can result in poor state estimates and in the worst case cause divergence from the true state. We follow an ensemble-based filtering methodology [8] which avoids the Taylor series approximation and hence the associated linearization errors. Fundamentally, we approximate the state prediction with a Monte Carlo approximation where the sample mean of the ensemble models the mean $\boldsymbol{\mu}$ and the sample covariance the covariance $\boldsymbol{\Sigma}$. Thus, rather than calculating these values explicitly during state prediction at time $t$ as in Eq. 4, we instead start with an ensemble of $E$ members sampled from the prior distribution $\mathcal{N}(\boldsymbol{\mu}_{t-1|t-1}, \boldsymbol{\Sigma}_{t-1|t-1})$ such that $\boldsymbol{X}_{t-1|t-1} = [\boldsymbol{x}^1,\dots,\boldsymbol{x}^E]$. Each member is propagated forward in time using the state evolution model with an additional perturbation sampled from the process noise, $$ \begin{align*} \boldsymbol{x}^j_{t|t-1} &= \boldsymbol{G} \boldsymbol{x}^j_{t-1|t-1} + \mathcal{N} \left(0, \boldsymbol{Q}_t\right), \quad 1 \leq j \leq E. \end{align*} $$ As $E$ approaches infinity, the ensemble effectively models the full covariance calculated in Eq. 2 [8]. We note that in BIP the state transition function is linear, however, when this is not the case the nonlinear function $g(\cdot)$ is used directly. During the measurement update step, we calculate the innovation covariance $\boldsymbol{S}$ and the Kalman gain $\boldsymbol{K}$ directly from the ensemble, with no need to specifically maintain a covariance matrix. We begin by calculating the transformation of the ensemble to the measurement space, via the nonlinear observation function $h(\cdot)$, along with the deviation of each ensemble member from the sample mean: $$ \begin{align*} \boldsymbol{H}_t\boldsymbol{X}_{t|t-1} &= \left[h(\boldsymbol{x}^1_{t|t-1}), \dots, h(\boldsymbol{x}^E_{t|t-1})\right]^\intercal,\\ \boldsymbol{H}_t\boldsymbol{A}_t &= \boldsymbol{H}_t\boldsymbol{X}_{t|t-1} \\ &- \left[ \frac{1}{E} \sum_{j=1}^{E}h(\boldsymbol{x}^j_{t|t-1}), \dots, \frac{1}{E} \sum_{j=1}^{E}h(\boldsymbol{x}^j_{t|t-1}) \right]. \nonumber \end{align*} $$ The innovation covariance can now be found with $$ \begin{align*} \boldsymbol{S}_t &= \frac{1}{E - 1} (\boldsymbol{H}_t\boldsymbol{A}_t) (\boldsymbol{H}_t\boldsymbol{A}_t)^\intercal + \boldsymbol{R}_t, \end{align*} $$ which is then used to compute the Kalman gain as $$ \begin{align*} \boldsymbol{A}_t &= \boldsymbol{X}_{t|t-1} - \frac{1}{E} \sum_{j=1}^{E}\boldsymbol{x}^j_{t|t-1},\\ \boldsymbol{K}_t &= \frac{1}{E - 1} \boldsymbol{A}_t (\boldsymbol{H}_t\boldsymbol{A}_t)^\intercal \boldsymbol{S}^{-1}_t. \end{align*} $$ With this information, the ensemble can be updated to incorporate the new measurement perturbed by stochastic noise: $\epsilon_{y} \sim \mathcal{N}(0, \boldsymbol{R}_t)$ [9]: $$ \begin{align*} \boldsymbol{\tilde{y}}_t &= \left[ \boldsymbol{y}_t + \epsilon^1_y, \dots, \boldsymbol{y}_t + \epsilon^E_y \right], \nonumber \\ \boldsymbol{X}_{t|t} &= \boldsymbol{X}_{t|t-1} + \boldsymbol{K} (\boldsymbol{\tilde{y}}_{t} - \boldsymbol{H}_t\boldsymbol{X}_{t|t-1}). \end{align*} $$ It has been shown that when $\epsilon_{y} \sim \mathcal{N}(0, \boldsymbol{R}_t)$, the measurements are treated as random variables and the ensemble accurately reflects the error covariance of the best state estimate [9]. One of the advantages of this algorithm is the elimination of linearization errors through the use of the nonlinear functions. While this introduces non-normality into the state uncertainties, it has been shown that the stochastic noise added to the measurements pushes the updated ensemble towards normality, thereby reducing the effects of higher-order moments [10] and improving robustness in nonlinear scenarios. ##### Non-Gaussian Prior Another source of non-Gaussian uncertainty is from the initial estimate (the prior) itself. In BIP, our prior is given by a set of demonstrations which indicate where we believe a successful demonstration would lie in the state space. As we have yet to assimilate any observations of a new interaction, the (unknown) true distribution from which the demonstrations are sampled represents our best initial estimate of what it may be. However, given that these are real-world demonstrations, they are highly unlikely to be normally distributed. As such, two options are available in this case: we can either use the demonstrations directly as samples from the non-Gaussian prior distribution or approximate the true distribution with a Gaussian and sample from it. The latter approach is used by BIP in Eq. 5, however, this comes with its own risk as a poor initial estimate can lead to poor state estimates [11]. Given that the ensemble-based filtering proposed here provides a degree of robustness to non-Gaussian uncertainties, we choose to use samples from the non-Gaussian prior directly in the eBIP algorithm, with the knowledge that the ensemble will be pushed towards normality. In the event that the number of ensemble members $E$ is greater than the number of available demonstrations, then the density of the true interaction distribution will need to be estimated given the observed demonstrations. This can be accomplished using any density estimation technique, however, we employ a Gaussian mixture model here and denote the resulting algorithm as eBIP$^-$. ##### Computational Performance Many HRI applications require the use of multiple sensors, which increases the size of the latent space dimension and results in undesirable increases in computation time in the BIP algorithm. This is due to the necessary covariance matrix operations defined in Eq. 4, which causes BIP to yield an asymptotic computational complexity of approximately $O(n^{3})$ -- with the state of the art lower bounded at approximately $O(n^{2.4})$ -- where $n$ is the state dimension. However, as eBIP is ensemble-based, we no longer explicitly maintain a covariance matrix; this information is implicitly captured by the ensemble. As a result, the computational complexity for eBIP is approximately $O(E^2n)$, where $E$ is the ensemble size and $n$ is the state dimension [12]. Since the ensemble size is typically much smaller than the state dimension, this results in a performance increase when compared to BIP. Furthermore, the formulation presented in this work also obviates the need to explicitly construct the observation matrix $\boldsymbol{H}$. The creation of the observation matrix introduces an additional overhead for BIP as it must be initialized at each time step due to the phase-dependence, a process which is unnecessary in eBIP. Ensemble Bayesian Interaction Primitives also benefit from the computational performance-accuracy trade off inherent to all sample-based methods. Inference accuracy can be sacrificed for computational performance by lowering the number of ensemble members when called for. While this is also true for particle filters, they generally scale poorly to higher state dimensions due to sample degeneracy. In particle filtering, ensemble members are re-sampled according to their weight in a scheme known as importance sampling. However, in large state spaces it is likely that only a small number of ensemble members will have high weights, thus eventually causing all members to be re-sampled from only a few. In our proposed method this is not the case, as all members are treated as if they have equal weight, thus lending itself well to high-dimensional state spaces. ## References [1] Amor, H.B., Neumann, G., Kamthe, S., Kroemer, O. and Peters, J., 2014, May. Interaction primitives for human-robot cooperation tasks. In 2014 IEEE international conference on robotics and automation (ICRA) (pp. 2831-2837). IEEE. [2] Ijspeert, A.J., Nakanishi, J., Hoffmann, H., Pastor, P. and Schaal, S., 2013. Dynamical movement primitives: learning attractor models for motor behaviors. Neural computation, 25(2), pp.328-373. [3] Giszter, S.F., Mussa-Ivaldi, F.A. and Bizzi, E., 1993. Convergent force fields organized in the frog's spinal cord. Journal of neuroscience, 13(2), pp.467-491. [4] Paraschos, A., Daniel, C., Peters, J.R. and Neumann, G., 2013. Probabilistic movement primitives. In Advances in neural information processing systems (pp. 2616-2624). [5] Maeda, G., Ewerton, M., Lioutikov, R., Amor, H.B., Peters, J. and Neumann, G., 2014, November. Learning interaction for collaborative tasks with probabilistic movement primitives. In 2014 IEEE-RAS International Conference on Humanoid Robots (pp. 527-534). IEEE. [6] Campbell, J. and Amor, H.B., 2017, October. Bayesian interaction primitives: A slam approach to human-robot interaction. In Conference on Robot Learning (pp. 379-387). [7] Campbell, J., Stepputtis, S. and Amor, H.B., 2019. Probabilistic Multimodal Modeling for Human-Robot Interaction Tasks. In Robotics: Science and Systems 2019. [8] Evensen, G., 2003. The ensemble Kalman filter: Theoretical formulation and practical implementation. Ocean dynamics, 53(4), pp.343-367. [9] Burgers, G., Jan van Leeuwen, P. and Evensen, G., 1998. Analysis scheme in the ensemble Kalman filter. Monthly weather review, 126(6), pp.1719-1724. [10] Lawson, W.G. and Hansen, J.A., 2004. Implications of stochastic and deterministic filters as ensemble-based data assimilation methods in varying regimes of error growth. Monthly weather review, 132(8), pp.1966-1981. [11] Haseltine, E.L. and Rawlings, J.B., 2005. Critical evaluation of extended Kalman filtering and moving-horizon estimation. Industrial & engineering chemistry research, 44(8), pp.2451-2460. [12] Mandel, J., 2006. Efficient implementation of the ensemble Kalman filter. University of Colorado at Denver and Health Sciences Center, Center for Computational Mathematics.
github_jupyter
### List Comprehensions We've used list comprehensions throughout this course quite a bit, so the concept should not be new, but let's recap quickly what we have seen so far with list comprehensions. A list comprehension is language construct that allows to easily build a list by transforming, and optionally, filtering, another iterable. For example, using a more traditional Java style approach we might create a list of squares of the first 100 positive integers in this way: ``` squares = [] # create an empty list for i in range(1, 101): squares.append(i**2) ``` We now have a list containing the desired numbers: ``` squares[0:10] ``` Using a list comprehension we can achieve the same results in a far more expressive way: ``` squares = [i**2 for i in range(1, 101)] squares[0:10] ``` When building a list from another iterable we may sometimes want to skip certain values. For example, we may want to build a list of squares for even positive integers only, up to 100. The more traditional way would go like this: ``` squares = [] for i in range(1, 101): if i % 2 == 0: squares.append(i**2) squares[0:10] ``` We can also use a list comprehension to achieve the same thing: ``` squares = [i**2 for i in range(1, 101) if i % 2 == 0] squares[0:10] ``` Although I have been writing the list comprehension on a single line, we can write them over multiple lines if we prefer: ``` squares = [i**2 for i in range(1, 101) if i % 2 == 0] squares[0:10] ``` Internal Mechanics of List Comprehensions As we discussed in the lecture, we need to recognize that list comprehensions are essentially temporary functions that Python creates, executes and returns the resulting list from it. We can see this by compiling a comprehension, and then disassembling the compiled code to see what happened: ``` import dis compiled_code = compile('[i**2 for i in (1, 2, 3)]', filename='', mode='eval') dis.dis(compiled_code) ``` As you can see, in step 4, Python created a function (`MAKE_FUNCTION`), called it (`CALL_FUNCTION`), and then returned the result (`RETURN_VALUE`) in the last step. So, comprehensions will behave like functions in terms of **scope**. They have local scope, and can access global and nonlocal scopes too. And nested comprehensions will also behave like nested functions and closures. #### Nested Comprehensions Let's look at a simple example that uses nested comprehensions. For example, suppose we want to generate a multiplication table: The traditional way first: ``` table = [] for i in range(1, 11): row = [] for j in range(1, 11): row.append(i*j) table.append(row) table ``` We can easily do the same thing using a list comprehension: ``` table2 = [ [i * j for j in range(1, 11)] for i in range(1, 11)] table2 ``` You'll notice here that we nested one list comprehension inside another. You should also notice that the inner comprehension (the one that has `i*j`) is accessing a local variable `i`, as well as a variable from the enclosing comprehension - the `j` variable. Just like a closure! And in fact, it is exactly that. We'll come back to that in a bit. Let's do another example - we'll construct Pascal's triangle - which is basically just a triangle of binomial coefficients: ``` 1 1 1 1 2 1 1 3 3 1 1 4 6 4 1 ``` we just need to know how to calculate combinations: ``` C(n, k) = n! / (k! (n-k)!) ``` * row 0, column 0: n=0, k=0: c(0, 0) = 0! / 0! 0! = 1/1 = 1 * row 4, column 2: n=4, k=2: c(4, 2) = 4! / 2! 2! = 4x3x2 / 2x2 = 6 In other words, we need to calculate the following list of lists: ``` c(0,0) c(1,0) c(1,1) c(2,0) c(2,1) c(2,3) c(3,0) c(3,1) c(3,2) c(3,3) ... ``` We can use a nested comprehension for that! ``` from math import factorial def combo(n, k): return factorial(n) // (factorial(k) * factorial(n-k)) size = 10 # global variable pascal = [ [combo(n, k) for k in range(n+1)] for n in range(size+1) ] pascal ``` Again note how the outer comprehension accessed a global variable (`size`), created a local variable (`n`), and the inner comprehension created its own local variable (`k`) and also accessed the nonlocal variable `n`. #### Nested Loops We can also created comprehensions that use nested loops (not nested comprehensions, just nested loops). Let's start with a simple example. Suppose we have two lists of characters, and we want to produce a new list consisting of the pairwise concatenated characters. e.g. `l1 = ['a', 'b', 'c']` `l2 = ['x', 'y', 'z']` and we want to produce the result: `['ax', 'ay', 'az', 'bx', 'by', 'bz', 'cx', 'cy', 'cz']` The traditional way first: ``` l1 = ['a', 'b', 'c'] l2 = ['x', 'y', 'z'] result = [] for s1 in l1: for s2 in l2: result.append(s1+s2) result ``` We can do the same nested loop using a comprehension instead: ``` result = [s1 + s2 for s1 in l1 for s2 in l2] result ``` We could expand this slightly by specifying that pairs resulting in the same letter twice should be ommitted: ``` l1 = ['a', 'b', 'c'] l2 = ['b', 'c', 'd'] result = [] for s1 in l1: for s2 in l2: if s1 != s2: result.append(s1 + s2) result ``` And the comprehension equivalent: ``` result = [s1 + s2 for s1 in l1 for s2 in l2 if s1 != s2] result ``` Building up the complexity, let's see how we might reproduce the `zip` function. Remember what the `zip` function does: ``` l1 = [1, 2, 3, 4, 5, 6, 7, 8, 9] l2 = ['a', 'b', 'c', 'd'] list(zip(l1, l2)) ``` We can do the same thing using a traditional nested loop: ``` result = [] for index_1, item_1 in enumerate(l1): for index_2, item_2 in enumerate(l2): if index_1 == index_2: result.append((item_1, item_2)) result ``` But we can do this using a list comprehension as well: ``` result = [ (item_1, item_2) for index_1, item_1 in enumerate(l1) for index_2, item_2 in enumerate(l2) if index_1 == index_2] result ``` Of course, using `zip` is way simpler! List comprehensions can also be quite handy when used in conjunction with functions such as `sum` for example. Suppose we have two n-dimensional vectors, represented as tuple of numbers, and we want to find the dot product of the two vectors: ` v1 = (c1, c2, c3, ..., cn) v2 = (d1, d2, d3, ..., dn) ` Then, the dot product is: ` c1 * d1 + c2 * d2 + ... + cn * dn ` The trick here is that we want to step through each vectors at the same time (a simple nested loop would not work), so a Java-like approach might be: ``` v1 = (1, 2, 3, 4, 5, 6) v2 = (10, 20, 30, 40, 50, 60) dot = 0 for i in range(len(v1)): dot += (v1[i] * v2[i]) print(dot) ``` But using zip and a list comprehension we can do it this way: ``` dot = sum([i * j for i, j in zip(v1, v2)]) print(dot) ``` In fact, and we'll cover this later in generator expressions, we don't even need the `[]`: ``` dot = sum(i * j for i, j in zip(v1, v2)) print(dot) ``` #### Things to watch out for There are a few things we have to be careful with, and that relates to the scope of variables used inside a comprehension. Let's first make sure we don't have the `number` symbol in our global scope: ``` if 'number' in globals(): del number l = [number**2 for number in range(5)] print(l) ``` What was the scope of `number`? ``` 'number' in globals() ``` As you can see, `number` was local to the comprehension, not the enclosing (global in this case) scope. But what if `number` was in our global scope: ``` number = 100 l = [number**2 for number in range(5)] number ``` As you can see, `number` in the comprehension was still local to the comprehension, and our global `number` was not affected. This is similar to global and nonlocal variables in functions. Because `number` is the loop item, it means that it gets *assigned* a value before being referenced, hence it is considered local - even if that symbol exists in a global or nonlocal scope. On the other hand, consider this example: ``` number = 100 l = [number * i for i in range(5)] print(l) ``` As you can see, the scope of the comprehension was able to reach out for `number` in the global scope. Same as functions. Now let's look at an example we've seen before when we studied closures. Suppose we want to generate a list of functions that will calculate powers of their argument, i.e. we want to define a bunch of functions * `fn_1(arg) --> arg ** 1` * `fn_2(arg) --> arg ** 2` * `fn_3(arg) --> arg ** 3` etc... We could certainly define a bunch of functions one by one: ``` fn_0 = lambda x: x**0 fn_1 = lambda x: x**1 fn_2 = lambda x: x**2 fn_3 = lambda x: x**3 # etc ``` But this would be very tedious if we had to do it more than just a few times. Instead, why don't we create those functions as lambdas and put them into a list where the index of the list will correspond to the power we are looking for. Something like this if we were doing it manually: ``` funcs = [lambda x: x**0, lambda x: x**1, lambda x: x**2, lambda x: x**3] ``` Now we can call these functions this way: ``` print(funcs[0](10)) print(funcs[1](10)) print(funcs[2](10)) print(funcs[3](10)) ``` Now all we need to do is to create these functions using a loop - the traditional way first: First let's make sure `i` is not in our global symbol table: ``` if 'i' in globals(): del i funcs = [] for i in range(6): funcs.append(lambda x: x**i) ``` And let's use them as before: ``` print(funcs[0](10)) print(funcs[1](10)) print(funcs[2](10)) print(funcs[3](10)) ``` What happened?? It looks like every function is actually calculating `10**5` Let's break down what happened in the loop, but without using a loop. Firs notice that `i` is now in our global symbol table: ``` print(i) ``` You'll also note that it has a value of `5` (from the last iteration that ran). Now let's walk through what happened manually: In the first iteration, the symbol `i` was created, and assigned a value of `0`: ``` i = 0 def fn_0(x): return x ** i ``` The `i` in `fn_0` is actually the global variable `i`. For the next 'iteration' we increment `i` by `1`: ``` i=1 def fn_1(x): return x ** i ``` The `i` in `fn_1` is still the global variable `i`. Now let's set `i` to something else: ``` i = 5 fn_0(10) fn_1(10) ``` and if we change `i` again: ``` i = 10 fn_0(10) ``` And this is **exactly** what happened in our loop based approach: ``` funcs = [] for i in range(6): funcs.append(lambda x: x**i) ``` When the loop ran, `i` was created in our **global** scope. By the time the loop finished running, `i` was 5 ``` print(i) ``` So when we call the functions, they are referencing the global variable `i` which is now set to `5`. And the same precise thing will happen if we use a comprehension to do the same thing: Let's delete the global `i` symbol first: ``` del i 'i' in globals() funcs = [lambda x: x**i for i in range(6)] 'i' in globals() ``` As we can see `i` is not in our globals, but `i` was a **local** variable in the list comprehension, and each function created in the comprehension is referencing the same `i` - it is local to the comprehension, and each lambda is therefore a closure with (the same) free variable `i`. And by the time the comprehension has finished running, `i` had a value of 5: ``` funcs[0](10), funcs[1](10) ``` Can we somehow fix this problem? Yes, and it relies on default values and when default values are calculated and stored with the function definition. Recall that default values are evaluated and stored with the function's definition **when the function is being created (i.e. compiled)**. Right now we are running into a problem because the free variable `i` is being evauated inside each function's body at **run time**. So, we can fix this by making each current value of `i` a paramer default of each lambda - this will get evaluated at the functions creation time - i.e. at each loop iteration: ``` funcs = [lambda x, pow=i: x**pow for i in range(6)] funcs[0](10), funcs[1](10), funcs[2](10) ``` As you can see that solved the problem. But this relies on some pretty detailed understanding of Python's behavior, and it is better not to use such techniques - other people reading your code will find it confusing and will make the code much harder to understand. We will come back to this comprehension syntax. We used it so far to create lists, but the same syntax will be used to create sets, dictionaries, and generators.
github_jupyter
# Amazon SageMaker で PyTorch の GNN を使ったノード分類を行う このサンプルノートブックは、[PyTorch geometric のサンプルコード](https://pytorch-geometric.readthedocs.io/en/latest/notes/colabs.html)を参考にしました。 ## Node Classification with Graph Neural Networks [Previous: Introduction: Hands-on Graph Neural Networks](https://colab.research.google.com/drive/1h3-vJGRVloF5zStxL5I0rSy4ZUPNsjy8) This tutorial will teach you how to apply **Graph Neural Networks (GNNs) to the task of node classification**. Here, we are given the ground-truth labels of only a small subset of nodes, and want to infer the labels for all the remaining nodes (*transductive learning*). To demonstrate, we make use of the `Cora` dataset, which is a **citation network** where nodes represent documents. Each node is described by a 1433-dimensional bag-of-words feature vector. Two documents are connected if there exists a citation link between them. The task is to infer the category of each document (7 in total). This dataset was first introduced by [Yang et al. (2016)](https://arxiv.org/abs/1603.08861) as one of the datasets of the `Planetoid` benchmark suite. We again can make use [PyTorch Geometric](https://github.com/rusty1s/pytorch_geometric) for an easy access to this dataset via [`torch_geometric.datasets.Planetoid`](https://pytorch-geometric.readthedocs.io/en/latest/modules/datasets.html#torch_geometric.datasets.Planetoid): ## 準備 **このサンプルでは、カスタムコンテナを Amazon ECR に push する必要があります。**以下の操作でこのノートブックインスタンスで使用している IAM ロールに Amazon ECR にイメージを push するための権限を追加してください。 1. Amazon SageMaker コンソールからこのノートブックインスタンスの詳細画面を表示<br>(左側のメニューのインスタンス -> ノートブックインスタンス -> インスタンス名をクリック) 1. 「アクセス許可と暗号化」の「IAM ロール ARN」のリンクをクリック(IAM のコンソールに遷移します) 1. 「ポリシーをアタッチします」と書いてある青いボタンをクリック 1. 検索ボックスに ec2containerregistry と入力し AmazonEC2ContainerRegistryFullAccess のチェックボックスをチェックする 1. 「ポリシーのアタッチ」と書いてある青いボタンをクリック 以下のセルでは、Amazon SageMaker を使うためのセットアップを行います。ロールの情報、ノートブックインスタンスのリージョン、アカウントID などの情報を取得しています。 ``` import boto3 import sys import sagemaker import numpy as np from sagemaker import get_execution_role role = get_execution_role() region = boto3.session.Session().region_name account_id = boto3.client('sts').get_caller_identity().get('Account') session = sagemaker.Session() bucket = session.default_bucket() s3_output = session.default_bucket() s3_prefix = 'gnn-byo' !mkdir docker !mkdir docker/processing !mkdir docker/train !mkdir docker/inference %%writefile docker/processing/requirements.txt boto3==1.17.35 torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html torch-sparse -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html torch-cluster -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html torch-spline-conv -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html torch-geometric==1.6.3 matplotlib==3.3.4 scikit-learn==0.24.1 !cp docker/processing/requirements.txt docker/train/requirements.txt ``` ## Amazon SageMaker Experiments のセットアップ Amazon SageMaker Experiments のライブラリをインストールします。 ``` !{sys.executable} -m pip install sagemaker-experiments requests ``` 前処理用、学習用の Expetiments を作成します。 ``` from sagemaker.analytics import ExperimentAnalytics from smexperiments.experiment import Experiment from smexperiments.trial import Trial from smexperiments.trial_component import TrialComponent from smexperiments.tracker import Tracker import time gnn_experiment_preprocess = Experiment.create( experiment_name=f"gnn-byo-preprocess-{int(time.time())}", description="node classification using gnn (preprocess)", sagemaker_boto_client=boto3.client('sagemaker')) print(gnn_experiment_preprocess) gnn_experiment_train = Experiment.create( experiment_name=f"gnn-byo-train-{int(time.time())}", description="node classification using gnn (train)", sagemaker_boto_client=boto3.client('sagemaker')) print(gnn_experiment_train) ``` このサンプルノートブックでは、データの前処理、前処理したデータを使ってモデルの学習、学習済みモデルを使ってバッチ推論、の順でおこないます。 これから 2種類のコンテナイメージを作成して Amazon ECR に push します。1つめのコンテナイメージはデータの前処理とバッチ推論で使用し、2つめのコンテナイメージはモデルの学習で使用します。 ## データの前処理 データの前処理は Amazon SageMaker Processing の仕組みを使って行います。まずは前処理用のコンテナイメージを作成します。 ``` ecr_repository = 'gnn-byo-proc' tag = ':latest' uri_suffix = 'amazonaws.com' processing_repository_uri = '{}.dkr.ecr.{}.{}/{}'.format(account_id, region, uri_suffix, ecr_repository + tag) %%writefile docker/processing/Dockerfile FROM python:3.8-buster WORKDIR /opt/app RUN pip3 install torch==1.8.0 COPY requirements.txt /opt/app RUN pip3 install -r requirements.txt RUN pip3 install -U torch-sparse -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html RUN pip3 install jupyter COPY . /opt/app EXPOSE 8888 # jupyter notebook --allow-root --ip=* --no-browser -NotebookApp.token='' ``` 上記 Dockerfile を使ってコンテナイメージをビルドし、Amazon ECR に push します。 ``` # Create ECR repository and push docker image !docker build -t $ecr_repository docker/processing !$(aws ecr get-login --region $region --registry-ids $account_id --no-include-email) !aws ecr create-repository --repository-name $ecr_repository !docker tag {ecr_repository + tag} $processing_repository_uri !docker push $processing_repository_uri ``` 作成したイメージを使って ScriptProcessor を作成します。このとき、`instance_type` に `local` を設定するとローカルモードになり、ノートブックインスタンス上で Processing Job が実行されます。作成したコンテナイメージやスクリプトのデバッグをする際は、ローカルモードの利用がおすすめです。デバッグが完了したら、`instance_type` に インスタンスタイプを設定して Processing Job を実施します。 ``` from sagemaker.processing import ScriptProcessor script_processor = ScriptProcessor(command=['python3'], image_uri=processing_repository_uri, role=role, sagemaker_session=session, instance_count=1, # instance_type='local') instance_type='ml.c5.xlarge') ``` Processing Job で使用するスクリプトを作成します。前処理の内容を変更した場合は、前処理スクリプトを更新してから 2つしたのセル(script_processor.run)を再度実行すれば OK です。コンテナイメージの再作成は不要です。 ``` %%writefile preprocessing.py import sys sys.path.append('/opt/app') import boto3 from torch_geometric.transforms import NormalizeFeatures from torch_geometric.datasets import Planetoid import torch import shutil if __name__=='__main__': aws_session = boto3.Session(profile_name=None) dataset = Planetoid(root='data/Planetoid', name='Cora', transform=NormalizeFeatures()) print(f'Dataset: {dataset}:') print('======================') print(f'Number of graphs: {len(dataset)}') print(f'Number of features: {dataset.num_features}') print(f'Number of classes: {dataset.num_classes}') data = dataset[0] # Get the first graph object. print(data) # Gather some statistics about the graph. print(f'Number of nodes: {data.num_nodes}') print(f'Number of edges: {data.num_edges}') print(f'Average node degree: {data.num_edges / data.num_nodes:.2f}') print(f'Number of training nodes: {data.train_mask.sum()}') print(f'Training node label rate: {int(data.train_mask.sum()) / data.num_nodes:.2f}') print(f'Contains isolated nodes: {data.contains_isolated_nodes()}') print(f'Contains self-loops: {data.contains_self_loops()}') print(f'Is undirected: {data.is_undirected()}') # save to container directory for uploading to S3 import os path = "./" files = os.listdir(path) print(files) src = 'data/Planetoid/Cora' dist = '/opt/ml/processing/output/Cora' print(os.path.getsize(src)) import tarfile # 圧縮 with tarfile.open('sample.tar.gz', 'w:gz') as t: t.add(src) files = os.listdir(path) print(files) shutil.copytree(src, dist) from torch_geometric.io import read_planetoid_data ``` 作成したスクリプトを使って `run` を実行して Processing Job を起動します。`run` の引数には以下を設定しています。 - code: 処理スクリプトのファイル名 - inputs: (入力データがある場合)入力データが保存されている Amazon S3 パスを `source` に、Processing 用インスタンスのどこに入力データをダウンロードするかを `destination` に設定します。今回はインターネット経由でデータをダウンロードするため使用しません。 - outputs: 出力データを保存する Processing 用インスタンスのパスを `source` で指定し、そこに処理済みのデータなどを保存しておくと、`destination` に設定した S3 パスにそれらのデータが自動的にアップロードされます。 - experiment_config: Processing Job を登録する Experiments があれば、その情報を指定します。 **以下をローカルモードで実行すると、最後に `PermissionError: [Errno 13] Permission denied: 'ind.cora.tx'` というエラーが出ますが、これはジョブがうまく動いていても出るので無視して構いません。インスタンスを使用した場合はこのエラーは出ません。** ``` from sagemaker.processing import ProcessingInput, ProcessingOutput from time import gmtime, strftime processing_job_name = "gnn-byo-process-{}".format(strftime("%d-%H-%M-%S", gmtime())) output_destination = 's3://{}/{}/data'.format(s3_output, s3_prefix) script_processor.run(code='preprocessing.py', job_name=processing_job_name, # inputs=[ProcessingInput( # source=raw_s3, # destination='/opt/ml/processing/input')], outputs=[ProcessingOutput(output_name='output', destination='{}/output'.format(output_destination), source='/opt/ml/processing/output')], experiment_config={ "ExperimentName": gnn_experiment_preprocess.experiment_name, "TrialComponentDisplayName": "Processing", } ) preprocessing_job_description = script_processor.jobs[-1].describe() ``` ## モデルの学習 ここまでで、データの前処理と、前処理済みデータの Amazon S3 へのアップロードが完了しました。次は、前処理済みのデータを使って GNN を学習します。 まずは学習用コンテナイメージを作成します。ベースイメージに、Amazon SageMaker が用意している PyTorch 1.8.0 のイメージを使用しました。 **この Dockerfile はノートブックインスタンスが `us-east-1 (バージニア北部)` の想定なので、他のリージョンをお使いの場合は FROM に書かれている Amazon ECR の URI の `us-east-1` の部分をお使いのリージョンに合わせて書き換えてください。** ``` %%writefile docker/train/Dockerfile # FROM python:3.8-buster FROM 763104351884.dkr.ecr.us-east-1.amazonaws.com/pytorch-training:1.8.0-cpu-py36-ubuntu18.04 WORKDIR /opt/app RUN pip3 install torch==1.8.0 COPY requirements.txt /opt/app RUN pip3 install -r requirements.txt RUN pip3 install -U torch-sparse -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html RUN pip3 install jupyter RUN pip3 install sagemaker-training WORKDIR / ecr_repository = 'gnn-byo-train' tag = ':latest' uri_suffix = 'amazonaws.com' train_repository_uri = '{}.dkr.ecr.{}.{}/{}'.format(account_id, region, uri_suffix, ecr_repository + tag) ``` ベースイメージは Amazon SageMaker が用意している Amazon ECR リポジトリに保存されているため、そこへのアクセス権が必要です。以下のコマンドを実行します。 ``` !$(aws ecr get-login --region $region --registry-ids 763104351884 --no-include-email) ``` 学習スクリプトを作成します。学習スクリプトの内容を変更した場合は、`pytorch_estimator.fit()` を再度実行すれば OK です。学習スクリプトをコンテナイメージの中に入れておらず、Estimator 経由でコンテナに渡すようにしているため、コンテナイメージの再作成は不要です。 ``` %%writefile train.py import torch from torch_geometric.nn import GCNConv import torch.nn.functional as F import json import argparse import os class GCN(torch.nn.Module): def __init__(self, hidden_channels, num_features, num_classes): super(GCN, self).__init__() torch.manual_seed(12345) self.conv1 = GCNConv(num_features, hidden_channels) self.conv2 = GCNConv(hidden_channels, num_classes) def forward(self, x, edge_index): x = self.conv1(x, edge_index) x = x.relu() x = F.dropout(x, p=0.5, training=self.training) x = self.conv2(x, edge_index) return x def train(): model.train() optimizer.zero_grad() # Clear gradients. out = model(data.x, data.edge_index) # Perform a single forward pass. loss = criterion(out[data.train_mask], data.y[data.train_mask]) # Compute the loss solely based on the training nodes. loss.backward() # Derive gradients. optimizer.step() # Update parameters based on gradients. return loss def test(): model.eval() out = model(data.x, data.edge_index) pred = out.argmax(dim=1) # Use the class with highest probability. test_correct = pred[data.test_mask] == data.y[data.test_mask] # Check against ground-truth labels. test_acc = int(test_correct.sum()) / int(data.test_mask.sum()) # Derive ratio of correct predictions. return test_acc def _save_checkpoint(model, optimizer, epoch, loss, args): # print("epoch: {} - loss: {}".format(epoch+1, loss)) checkpointing_path = args.checkpoint_path + '/checkpoint.pth' print("Saving the Checkpoint: {}".format(checkpointing_path)) torch.save({ 'epoch': epoch+1, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss, }, checkpointing_path) def _load_checkpoint(model, optimizer, args): print("--------------------------------------------") print("Checkpoint file found!") print("Loading Checkpoint From: {}".format(args.checkpoint_path + '/checkpoint.pth')) checkpoint = torch.load(args.checkpoint_path + '/checkpoint.pth') model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) epoch_number = checkpoint['epoch'] loss = checkpoint['loss'] print("Checkpoint File Loaded - epoch_number: {} - loss: {}".format(epoch_number, loss)) print('Resuming training from epoch: {}'.format(epoch_number+1)) print("--------------------------------------------") return model, optimizer, epoch_number if __name__=='__main__': parser = argparse.ArgumentParser() # Data and model checkpoints directories parser.add_argument('--features-num', type=int, default=64, metavar='N', help='input feature size (default: 64)') parser.add_argument('--classes-num', type=int, default=1, metavar='N', help='input class size (default: 1)') parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=100, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--backend', type=str, default=None, help='backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)') # Container environment parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS'])) parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST']) parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR']) parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAIN']) parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS']) parser.add_argument("--checkpoint-path",type=str,default="/opt/ml/checkpoints") args = parser.parse_args() model = GCN(hidden_channels=16, num_features=args.features_num, num_classes=args.classes_num) print(model) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=5e-4) criterion = torch.nn.CrossEntropyLoss() path = args.data_dir files = os.listdir(path) print(files) from torch_geometric.io import read_planetoid_data data = read_planetoid_data(args.data_dir, 'Cora') # Check if checkpoints exists if not os.path.isfile(args.checkpoint_path + '/checkpoint.pth'): epoch_number = 0 else: model, optimizer, epoch_number = _load_checkpoint(model, optimizer, args) for epoch in range(epoch_number, int(args.epochs)+1): loss = train() acc = test() print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Acc: {acc:.4f}') if (epoch %100 == 0): _save_checkpoint(model, optimizer, epoch, loss, args) torch.save(model.state_dict(), args.model_dir+'/model.pth') # Create ECR repository and push docker image !docker build -t $ecr_repository docker/train !$(aws ecr get-login --region $region --registry-ids $account_id --no-include-email) !aws ecr create-repository --repository-name $ecr_repository !docker tag {ecr_repository + tag} $train_repository_uri !docker push $train_repository_uri ``` もし、上記コマンドでコンテナイメージをビルドする際に no space left というエラーが出ていたら、以下のコマンドのコメントアウトを解除して実行し、不要なファイルを削除してから再度コンテナイメージのビルドを実行してください。 ``` # !docker system prune -a -f ``` Estimator を作成して `fit` で学習ジョブを起動します。ハイパーパラメタの設定や取得したいメトリクスの情報を指定することができます。Processing Job と同様にローカルモードを使用することができます。`fit` の引数には、学習データが保存されている S3 のパスを指定します。PyTorch の Estimator については [こちらのドキュメント](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/sagemaker.pytorch.html#sagemaker.pytorch.estimator.PyTorch) をご参照ください。今回 PyTorch という名前の Estimator を使用しましたが、コンテナイメージの中に学習スクリプトを含めた状態で使用する場合は、Estimator という名前の Estimator を使用してください。 Estimator の `metric_definitions` に記録したいメトリクスの情報を指定することができます。`Regex` には、学習スクリプトが出力するログから取得したい数値を抽出するための正規表現を指定します。つまりメトリクスを記録したい場合は、学習スクリプトがメトリクスに関する情報をログに出力する必要があります。今回は Loss と Acc をメトリクスとして取得するよう設定しています。 Spot Instanceを用いて実行する場合は、下記のコードを Estimator の `instance_type`の次の行あたりに追加します。なお、`max_wait` は、`max_run` 以上の値である必要があります。 ```python max_run = 5000, use_spot_instances = 'True', max_wait = 10000, ``` チェックポイントの利用は必須ではありませんが、Spot Instance を使う場合は中断に備えてチェックポイントを有効にすることが推奨されています。チェックポイントの学習インスタンス上の保存パス(checkpoint_local_path)と、それらをアップロードする先のパス(checkpoint_s3_path)を設定し、学習スクリプトにチェックポイントを checkpoint_local_path に保存する記述を追加します。 保存したチェックポイントから学習を再開する場合は、新しく Estimator 定義して引数にチェックポイントが保存してある checkpoint_s3_path と チェックポイントをダウンロードしたいパス checkpoint_local_path を設定して fit を実行します。 チェックポイントの詳細については [こちらのドキュメント](https://docs.aws.amazon.com/sagemaker/latest/dg/model-checkpoints.html#model-checkpoints-enable) をご参照ください。 ``` from sagemaker.estimator import Estimator from sagemaker.pytorch.estimator import PyTorch import uuid import os # Spot training をする場合は、チェックポイントの設定を推奨 checkpoint_suffix = str(uuid.uuid4())[:8] checkpoint_s3_path = 's3://{}/checkpoint-{}'.format(bucket, checkpoint_suffix) checkpoint_local_path="/opt/ml/checkpoints" pytorch_estimator = PyTorch( entry_point='train.py', image_uri=train_repository_uri, role=role, instance_count=1, # instance_type='local', instance_type='ml.c4.2xlarge', max_run = 5000, use_spot_instances = 'True', max_wait = 10000, checkpoint_s3_uri=checkpoint_s3_path, checkpoint_local_path=checkpoint_local_path, output_path="s3://{}/output".format(bucket), sagemaker_session=session, hyperparameters = {'epochs': 200, 'features-num':1433, 'classes-num':7, 'lr':0.01}, enable_sagemaker_metrics=True, metric_definitions = [dict( Name = 'Loss', Regex = 'Loss: ([0-9.]+)' ), dict( Name = 'Acc', Regex = 'Acc: ([0-9.]+)' ) ] ) pytorch_estimator.fit({'train': os.path.join(output_destination, 'output/Cora/raw/')}, experiment_config={ "ExperimentName": gnn_experiment_train.experiment_name, "TrialComponentDisplayName": "Training", }) ``` ## Amazon SageMaker Experiments でモデルを比較 SageMaker Experiments を使うと複数のモデルのメトリクスなどを比較することができます。上のセルの Estimator の引数で epochs や lr などのハイパーパラメタを変えて何度か学習を実行してから次のセル以降を実行してみましょう。Experiments 内の Trial のフィルタやソートなど方法については [ExperimentAnalytics のドキュメント](https://sagemaker.readthedocs.io/en/stable/api/training/analytics.html#sagemaker.analytics.ExperimentAnalytics) をご参照ください。 メトリクスに関して、DataFrame の列名は Loss - Min などと書かれていますが、ExperimentAnalytics の sort_by で Loss - Min を指定する場合は、metrics.loss.min となります。 ``` search_expression = { "Filters":[ { "Name": "DisplayName", "Operator": "Equals", "Value": "Training", } ], } trial_component_analytics = ExperimentAnalytics( sagemaker_session=session, experiment_name=gnn_experiment_train.experiment_name, search_expression=search_expression, sort_by="metrics.acc.max", sort_order="Ascending",# Ascending or Descending metric_names=['Loss', 'Acc'], parameter_names=['epochs', 'lr'], input_artifact_names=[] ) import pandas as pd df = trial_component_analytics.dataframe() pd.set_option('display.max_columns', None) df print(df.columns.tolist()) ``` ## Processing Job を使ったバッチ推論 学習したモデルを使ってバッチ推論を行います。今回は、前処理で使用したコンテナイメージを流用してバッチ推論用 Processing Job を起動します。 まずは推論用スクリプトを作成します。<br> 推論結果をグラフにプロットし、その画像を Amazon S3 にアップロードするようにしました。 ``` %%writefile inference.py import torch from torch_geometric.nn import GCNConv import torch.nn.functional as F import json import argparse import os import tarfile import matplotlib.pyplot as plt class GCN(torch.nn.Module): def __init__(self, hidden_channels, num_features, num_classes): super(GCN, self).__init__() torch.manual_seed(12345) self.conv1 = GCNConv(num_features, hidden_channels) self.conv2 = GCNConv(hidden_channels, num_classes) def forward(self, x, edge_index): x = self.conv1(x, edge_index) x = x.relu() x = F.dropout(x, p=0.5, training=self.training) x = self.conv2(x, edge_index) return x def test(): model.eval() out = model(data.x, data.edge_index) pred = out.argmax(dim=1) # Use the class with highest probability. test_correct = pred[data.test_mask] == data.y[data.test_mask] # Check against ground-truth labels. test_acc = int(test_correct.sum()) / int(data.test_mask.sum()) # Derive ratio of correct predictions. return test_acc from sklearn.manifold import TSNE def visualize(h, color, path): z = TSNE(n_components=2).fit_transform(out.detach().cpu().numpy()) fig = plt.figure(figsize=(10,10)) plt.xticks([]) plt.yticks([]) plt.scatter(z[:, 0], z[:, 1], s=70, c=color, cmap="Set2") # plt.show() fig.savefig(os.path.join(path, "img.png")) if __name__=='__main__': parser = argparse.ArgumentParser() # Data and model checkpoints directories parser.add_argument('--features-num', type=str, default='1', metavar='N', help='input feature size (default: 1)') parser.add_argument('--classes-num', type=str, default='1', metavar='N', help='input class size (default: 1)') parser.add_argument('--model-dir', type=str, default='/opt/ml/model', metavar='N', help='model data path (default: /opt/ml/model)') parser.add_argument('--input-dir', type=str, default='/opt/ml/input', metavar='N', help='input data path (default: /opt/ml/input)') parser.add_argument('--output-dir', type=str, default='/opt/ml/output', metavar='N', help='output data path (default: /opt/ml/output)') args = parser.parse_args() from torch_geometric.io import read_planetoid_data data = read_planetoid_data(args.input_dir, 'Cora') with tarfile.open(os.path.join(args.model_dir, 'model.tar.gz'), 'r:gz') as t: t.extractall() model = GCN(hidden_channels=16, num_features=int(args.features_num), num_classes=int(args.classes_num)) model.load_state_dict(torch.load('model.pth')) # print(model) test_acc = test() print(f'Test Accuracy: {test_acc:.4f}') model.eval() out = model(data.x, data.edge_index) visualize(out, color=data.y, path=args.output_dir) from sagemaker.processing import ScriptProcessor batch_inference_processor = ScriptProcessor(command=['python3'], image_uri=processing_repository_uri, role=role, instance_count=1, # instance_type='local') instance_type='ml.c5.xlarge') from sagemaker.processing import ProcessingInput, ProcessingOutput from time import gmtime, strftime processing_job_name = "gnn-byo-batch-inference-{}".format(strftime("%d-%H-%M-%S", gmtime())) output_destination_inference = 's3://{}/{}/batch-inference'.format(s3_output, s3_prefix) input_dir = '/opt/ml/processing/input' model_dir = '/opt/ml/processing/model' output_dir = '/opt/ml/processing/output' model_s3 = pytorch_estimator.model_data raw_s3 = os.path.join(output_destination, 'output/Cora/raw/') batch_inference_processor.run(code='inference.py', job_name=processing_job_name, inputs=[ProcessingInput( source=model_s3, destination=model_dir), ProcessingInput( source=raw_s3, destination=input_dir)], outputs=[ProcessingOutput(output_name='output', destination='{}/output'.format(output_destination_inference), source=output_dir)], arguments=['--model-dir', model_dir, '--input-dir', input_dir, '--output-dir', output_dir , '--features-num', '1433', '--classes-num', '7'] # experiment_config={ # "ExperimentName": gnn_experiment.experiment_name, # "TrialComponentDisplayName": "Processing", # } ) preprocessing_job_description = batch_inference_processor.jobs[-1].describe() ``` バッチ推論で出力したプロットの画像をダウンロードして表示します。 ``` !aws s3 cp $output_destination_inference/output/img.png ./ from IPython.display import Image Image("./img.png") ``` ## リソースの削除 利用が終わったら、このノートブックを実行したノートブックインスタンスの停止および削除を実施してください。ノートブックインスタンスを停止させると、ノートブックインスタンスの課金は止まりますがアタッチされている EBS ボリュームへの課金が継続しますので、完全に課金を止めるにはノートブックインスタンスの停止だけでなく削除まで実施してください。 また、Amazon S3 にアップロードした各種ファイルに対しても課金が発生するため、不要であれば削除してください。 ``` sm = boto3.Session().client('sagemaker') def cleanup(experiment): for trial_summary in experiment.list_trials(): trial = Trial.load(sagemaker_boto_client=sm, trial_name=trial_summary.trial_name) for trial_component_summary in trial.list_trial_components(): tc = TrialComponent.load( sagemaker_boto_client=sm, trial_component_name=trial_component_summary.trial_component_name) trial.remove_trial_component(tc) try: # comment out to keep trial components tc.delete() except: # tc is associated with another trial continue # to prevent throttling time.sleep(.5) trial.delete() experiment.delete() cleanup(gnn_experiment_preprocess) cleanup(gnn_experiment_train) ```
github_jupyter
--- _You are currently looking at **version 1.1** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-social-network-analysis/resources/yPcBs) course resource._ --- # Assignment 1 - Creating and Manipulating Graphs Eight employees at a small company were asked to choose 3 movies that they would most enjoy watching for the upcoming company movie night. These choices are stored in the file `Employee_Movie_Choices.txt`. A second file, `Employee_Relationships.txt`, has data on the relationships between different coworkers. The relationship score has value of `-100` (Enemies) to `+100` (Best Friends). A value of zero means the two employees haven't interacted or are indifferent. Both files are tab delimited. ``` import networkx as nx import pandas as pd import numpy as np from networkx.algorithms import bipartite # This is the set of employees employees = set(['Pablo', 'Lee', 'Georgia', 'Vincent', 'Andy', 'Frida', 'Joan', 'Claude']) # This is the set of movies movies = set(['The Shawshank Redemption', 'Forrest Gump', 'The Matrix', 'Anaconda', 'The Social Network', 'The Godfather', 'Monty Python and the Holy Grail', 'Snakes on a Plane', 'Kung Fu Panda', 'The Dark Knight', 'Mean Girls']) # you can use the following function to plot graphs # make sure to comment it out before submitting to the autograder def plot_graph(G, weight_name=None): ''' G: a networkx G weight_name: name of the attribute for plotting edge weights (if G is weighted) ''' %matplotlib notebook import matplotlib.pyplot as plt plt.figure() pos = nx.spring_layout(G) edges = G.edges() weights = None if weight_name: weights = [int(G[u][v][weight_name]) for u,v in edges] labels = nx.get_edge_attributes(G,weight_name) nx.draw_networkx_edge_labels(G,pos,edge_labels=labels) nx.draw_networkx(G, pos, edges=edges, width=weights); else: nx.draw_networkx(G, pos, edges=edges); ``` ### Question 1 Using NetworkX, load in the G1.edges() graph from `Employee_Movie_Choices.txt` and return that graph. *This function should return a networkx graph with 19 nodes and 24 edges* ``` def answer_one(): df1 = pd.read_csv('Employee_Movie_Choices.txt', delimiter="\t") G1 = nx.from_pandas_dataframe(df1, source='#Employee', target='Movie') # adjust the node positions in the graph # l, r = nx.bipartite.sets(G1) # pos = {} # pos.update( (n, (1, i)) for i, n in enumerate(l) ) # put nodes from l at x=1 # pos.update( (n, (2, i)) for i, n in enumerate(r) ) # put nodes from r at x=2 return G1 answer_one() ``` ### Question 2 Using the graph from the previous question, add nodes attributes named `'type'` where movies have the value `'movie'` and employees have the value `'employee'` and return that graph. *This function should return a networkx graph with node attributes `{'type': 'movie'}` or `{'type': 'employee'}`* ``` def answer_two(): G1 = answer_one() l, r = nx.bipartite.sets(G1) dict_type={} dict_type.update((n, 'employee') for i, n in enumerate(l)) dict_type.update((n, 'movie') for i, n in enumerate(r)) nx.set_node_attributes(G1,name='type', values=dict_type) return G1 answer_two() ``` ### Question 3 Find a weighted projection of the graph from `answer_two` which tells us how many movies different pairs of employees have in common. *This function should return a weighted projected graph.* ``` def answer_three(): G1 = answer_two() l, r = nx.bipartite.sets(G1) G2 = bipartite.weighted_projected_graph(G1,l) return G2 ``` ### Question 4 Suppose you'd like to find out if people that have a high relationship score also like the same types of movies. Find the Pearson correlation ( using `DataFrame.corr()` ) between employee relationship scores and the number of movies they have in common. If two employees have no movies in common it should be treated as a 0, not a missing value, and should be included in the correlation calculation. *This function should return a float.* ``` G1 = answer_two() l, r = nx.bipartite.sets(G1) G2 = bipartite.weighted_projected_graph(G1,l) print(G2.edges()) def answer_four(): # Your Code Here return # Your Answer Here ```
github_jupyter
# ML algorithms: Logistic Regression Source: https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression From the sklearn handbook: >Logistic regression, despite its name, is a linear model for classification rather than regression. Logistic regression is also known in the literature as logit regression, maximum-entropy classification (MaxEnt) or the log-linear classifier. In this model, the probabilities describing the possible outcomes of a single trial are modeled using a logistic function. Code: http://marcharper.codes/2016-06-27/Logistic+Regression.html Slides: https://s3.amazonaws.com/assets.datacamp.com/production/course_15356/slides/chapter4.pdf ``` %matplotlib inline %load_ext autoreload %autoreload 2 import matplotlib.pyplot as plt import pandas as pd from scipy import stats import seaborn as sns distA = stats.norm(30, 5) distB = stats.norm(15, 4) data = [] for i in range(100): data.append((distA.rvs(), "A")) data.append((distB.rvs(), "B")) df = pd.DataFrame(data, columns=["measurement", "class"]) df.head() sns.violinplot(x="class", y="measurement", data=df); sns.distplot(df[df["class"] == "A"]["measurement"]) sns.distplot(df[df["class"] == "B"]["measurement"]); # convert categorical values to numbers df["class_num"] = df['class'].apply(lambda x: 1 if x == 'A' else 0 ) df.head() plt.scatter(df["measurement"], df["class_num"]) plt.show() ``` We could try to use a linear regression to separate the classes. With the best fit line we could label points above and below the line in seperate classes. This works ok (better than no classifier) but has a lot of drawbacks, and logistic regression typically gives a better solution. ``` from sklearn import linear_model X = df[["measurement"]] y = df["class_num"] model = linear_model.LinearRegression() model.fit(X, y) plt.scatter(df["measurement"], df["class_num"]) plt.plot(df["measurement"], model.predict(X), color="r") plt.show() ``` A logistic regression produces a classifier that separates the two classes much more sharply. ``` from sklearn import linear_model df.sort_values(by="measurement", inplace=True) X = df[["measurement"]] y = df["class_num"] model = linear_model.LogisticRegression() model.fit(X, y) plt.scatter(df["measurement"], df["class_num"]) plt.plot(df["measurement"], model.predict(X), color="r") plt.xlabel("Measurement") plt.show(); ``` We can also plot the predicted probabilities and check the accuracy of the model. ``` from sklearn import linear_model df.sort_values(by="measurement", inplace=True) X = df[["measurement"]] y = df["class_num"] model = linear_model.LogisticRegression() model.fit(X, y) plt.scatter(df["measurement"], df["class_num"]) plt.plot(df["measurement"], model.predict_proba(X)[:, 1], color="r") plt.xlabel("Measurement") plt.ylabel("Probability of being in class B") plt.show() print("Accuracy", model.score(X, y)) ``` Now let's try a set of data that is not so well separated. ``` distA = stats.norm(22, 5) distB = stats.norm(15, 3) data = [] for i in range(100): data.append((distA.rvs(), "A")) data.append((distB.rvs(), "B")) df = pd.DataFrame(data, columns=["measurement", "class"]) df["class_num"] = df['class'].apply(lambda x: 1 if x == 'A' else 0 ) sns.distplot(df[df["class"] == "A"]["measurement"]) sns.distplot(df[df["class"] == "B"]["measurement"]); from sklearn import linear_model df.sort_values(by="measurement", inplace=True) X = df[["measurement"]] y = df["class_num"] model = linear_model.LogisticRegression() model.fit(X, y) plt.scatter(df["measurement"], df["class_num"]) plt.plot(df["measurement"], model.predict_proba(X)[:, 1], color="r") plt.show() print("Accuracy", model.score(X, y)) ``` # A more complex real-world example/ analysis Source: https://github.com/carljv/Will_it_Python/tree/master/ARM/ch5 >Logistic models of well switching in Bangladesh >Our data are information on about 3,000 respondent households in Bangladesh with wells having an unsafe amount of arsenic. The data record the amount of arsenic in the respondent's well, the distance to the nearest safe well (in meters), whether that respondent "switched" wells by using a neighbor's safe well instead of their own, as well as the respondent's years of education and a dummy variable indicating whether they belong to a community association. >Our goal is to model well-switching decision. Since it's a binary variable (1 = switch, 0 = no switch), we'll use logistic regression. >This analysis follows Gelman and Hill Data Analysis Using Regression and Multilevel/Hierarchical Models, chapter 5.4. ``` import numpy as np from pandas import * from statsmodels.formula.api import logit import statsmodels.api as sm import matplotlib.pyplot as plt from patsy import dmatrix, dmatrices df = read_csv('/data/ifu/summerschool/wells.dat', sep = ' ', header = 0, index_col = 0) df.head() ``` ### Model 1: Distance to a safe well For our first pass, we'll just use the distance to the nearest safe well. Since the distance is recorded in meters, and the effect of one meter is likely to be very small, we can get nicer model coefficients if we scale it. Instead of creating a new scaled variable, we'll just do it in the formula description using the I() function. ``` model1 = logit('switch ~ I(dist/100.)', data = df).fit() model1.summary() def binary_jitter(x, jitter_amount = .05): ''' Add jitter to a 0/1 vector of data for plotting. ''' jitters = np.random.rand(*x.shape) * jitter_amount x_jittered = x + np.where(x == 1, -1, 1) * jitters return x_jittered dist_logit_par = model1.params['I(dist / 100.)'] plt.plot(df['dist'], binary_jitter(df['switch'], .1), '.', alpha = .1) plt.plot(np.sort(df['dist']), model1.predict()[np.argsort(df['dist'])], lw = 2) plt.ylabel('Switched Wells') plt.xlabel('Distance from safe well (meters)'); ``` Another way to look at this is to plot the densities of distance for switchers and non-switchers. We expect the distribution of switchers to have more mass over short distances and the distribution of non-switchers to have more mass over long distances. ``` kde_sw = kde = sm.nonparametric.KDEUnivariate(df['dist'][df['switch'] == 1]) kde_nosw = sm.nonparametric.KDEUnivariate(df['dist'][df['switch'] == 0]) kde_sw.fit() kde_nosw.fit() plt.plot(kde_sw.support, kde_sw.density, label = 'Switch') plt.plot(kde_nosw.support, kde_nosw.density, color = 'red', label = 'No Switch') plt.xlabel('Distance (meters)') plt.legend(loc = 'best'); ``` #### Model 2: Distance to a safe well and the arsenic level of own well Next, let's add the arsenic level as a regressor. We'd expect respondents with higher arsenic levels to be more motivated to switch. ``` model2 = logit('switch ~ I(dist / 100.) + arsenic', data = df).fit() model2.summary() ``` Which is what we see. The coefficients are what we'd expect: the farther to a safe well, the less likely a respondent is to switch, but the higher the arsenic level in their own well, the more likely. ### Marginal effects To see the effect of these on the probability of switching, let's calculate the marginal effects at the mean of the data. ``` argeff = model2.get_margeff(at = 'mean') print(argeff.summary()) ``` So, for the mean respondent, an increase of 100 meters to the nearest safe well is associated with a 22% lower probability of switching. But an increase of 1 in the arsenic level is associated with an 11% higher probability of switching. #### Class separability To get a sense of how well this model might classify switchers and non-switchers, we can plot each class of respondent in (distance-arsenic)-space. We don't see very clean separation, so we'd expect the model to have a fairly high error rate. But we do notice that the short-distance/high-arsenic region of the graph is mostly comprised switchers, and the long-distance/low-arsenic region is mostly comprised of non-switchers. ``` logit_pars = model2.params intercept = -logit_pars[0] / logit_pars[2] slope = -logit_pars[1] / logit_pars[2] dist_sw = df['dist'][df['switch'] == 1] dist_nosw = df['dist'][df['switch'] == 0] arsenic_sw = df['arsenic'][df['switch'] == 1] arsenic_nosw = df['arsenic'][df['switch'] == 0] plt.figure(figsize = (12, 8)) plt.plot(dist_sw, arsenic_sw, '.', mec = 'purple', mfc = 'None', label = 'Switch') plt.plot(dist_nosw, arsenic_nosw, '.', mec = 'orange', mfc = 'None', label = 'No switch') plt.plot(np.arange(0, 350, 1), intercept + slope * np.arange(0, 350, 1) / 100., '-k', label = 'Separating line') plt.ylim(0, 10) plt.xlabel('Distance to safe well (meters)') plt.ylabel('Arsenic level') plt.legend(loc = 'best'); ``` ### Model 3: Adding an interation It's sensible that distance and arsenic would interact in the model. In other words, the effect of an 100 meters on your decision to switch would be affected by how much arsenic is in your well. Again, we don't have to pre-compute an explicit interaction variable. We can just specify an interaction in the formula description using the : operator. ``` model3 = logit('switch ~ I(dist / 100.) + arsenic + I(dist / 100.):arsenic', data = df).fit() model3.summary() ``` The coefficient on the interaction is negative and significant. While we can't directly intepret its quantitative effect on switching, the qualitative interpretation gels with our intuition. Distance has a negative effect on switching, but this negative effect is reduced when arsenic levels are high. Alternatively, the arsenic level have a positive effect on switching, but this positive effect is reduced as distance to the nearest safe well increases. ### Model 4: Adding educuation, more interactions and centering variables Respondents with more eduction might have a better understanding of the harmful effects of arsenic and therefore may be more likely to switch. Education is in years, so we'll scale it for more sensible coefficients. We'll also include interactions amongst all the regressors. We're also going to center the variables, to help with interpretation of the coefficients. Once more, we can just do this in the formula, without pre-computing centered variables. ``` model_form = ('switch ~ center(I(dist / 100.)) + center(arsenic) + ' + 'center(I(educ / 4.)) + ' + 'center(I(dist / 100.)) : center(arsenic) + ' + 'center(I(dist / 100.)) : center(I(educ / 4.)) + ' + 'center(arsenic) : center(I(educ / 4.))' ) model4 = logit(model_form, data = df).fit() model4.summary() ``` #### Model assessment: Binned Residual plots Plotting residuals to regressors can alert us to issues like nonlinearity or heteroskedasticity. Plotting raw residuals in a binary model isn't usually informative, so we do some smoothing. Here, we'll averaging the residuals within bins of the regressor. (A lowess or moving average might also work.) ``` model4.resid_response def bin_residuals(resid, var, bins): ''' Compute average residuals within bins of a variable. Returns a dataframe indexed by the bins, with the bin midpoint, the residual average within the bin, and the confidence interval bounds. ''' resid_df = DataFrame({'var': var, 'resid': resid}) resid_df['bins'] = qcut(var, bins) bin_group = resid_df.groupby('bins') bin_df = bin_group['var', 'resid'].mean() bin_df['count'] = bin_group['resid'].count() bin_df['lower_ci'] = -2 * (bin_group['resid'].std() / np.sqrt(bin_group['resid'].count())) bin_df['upper_ci'] = 2 * (bin_group['resid'].std() / np.sqrt(bin_df['count'])) bin_df = bin_df.sort_values('var') return(bin_df) def plot_binned_residuals(bin_df): ''' Plotted binned residual averages and confidence intervals. ''' plt.plot(bin_df['var'], bin_df['resid'], '.') plt.plot(bin_df['var'], bin_df['lower_ci'], '-r') plt.plot(bin_df['var'], bin_df['upper_ci'], '-r') plt.axhline(0, color = 'gray', lw = .5) arsenic_resids = bin_residuals(model4.resid_response, df['arsenic'], 40) dist_resids = bin_residuals(model4.resid_response, df['dist'], 40) plt.figure(figsize = (12, 5)) plt.subplot(121) plt.ylabel('Residual (bin avg.)') plt.xlabel('Arsenic (bin avg.)') plot_binned_residuals(arsenic_resids) plt.subplot(122) plot_binned_residuals(dist_resids) plt.ylabel('Residual (bin avg.)') plt.xlabel('Distance (bin avg.)'); ``` #### Model 5: log-scaling arsenic The binned residual plot indicates some nonlinearity in the arsenic variable. Note how the model over-estimated for low arsenic and underestimates for high arsenic. This suggests a log transformation or something similar. We can again do this transformation right in the formula. ``` model_form = ('switch ~ center(I(dist / 100.)) + center(np.log(arsenic)) + ' + 'center(I(educ / 4.)) + ' + 'center(I(dist / 100.)) : center(np.log(arsenic)) + ' + 'center(I(dist / 100.)) : center(I(educ / 4.)) + ' + 'center(np.log(arsenic)) : center(I(educ / 4.))' ) model5 = logit(model_form, data = df).fit() model5.summary() ``` And the binned residual plot for arsenic now looks better. ``` arsenic_resids = bin_residuals(model5.resid_response, df['arsenic'], 40) dist_resids = bin_residuals(model5.resid_response, df['dist'], 40) plt.figure(figsize = (12, 5)) plt.subplot(121) plot_binned_residuals(arsenic_resids) plt.ylabel('Residual (bin avg.)') plt.xlabel('Arsenic (bin avg.)') plt.subplot(122) plot_binned_residuals(dist_resids) plt.ylabel('Residual (bin avg.)') plt.xlabel('Distance (bin avg.)'); ``` #### Model error rates The pred_table() gives us a confusion matrix for the model. We can use this to compute the error rate of the model. We should compare this to the null error rates, which comes from a model that just classifies everything as whatever the most prevalent response is. Here 58% of the respondents were switchers, so the null model just classifies everyone as a switcher, and therefore has an error rate of 42%. ``` print(model5.pred_table()) print(f'Model Error Rate: {1 - np.diag(model5.pred_table()).sum() / model5.pred_table().sum():2.0%}') print(f' Null Error Rate: {1 - df.switch.mean():2.0%}') ``` # Using sklearn ``` from sklearn.linear_model import LogisticRegression from sklearn import metrics from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler X = df.drop('switch', axis=1) y = df['switch'] # no columns need to be converted to one-hot encoding... X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # not needed for logistic regression # call fit_transform on the training set, but only transform on the test set! #sc = StandardScaler() #Xt_train = sc.fit_transform(X_train) #Xt_test = sc.transform (X_test) logreg = LogisticRegression() logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) print(f'Accuracy of logistic regression classifier on test set: {logreg.score(X_test, y_test):2.1%}') ```
github_jupyter
# Full pipeline (quick) This notebook explains the full pipeline in a detailed manner, including the preprocessing steps, the summerization steps and the classification ones. ## Loading the dataset under the Pandas Dataframe format Because Melusine operates Pandas Dataframes by applying functions to certain columns to produce new columns, the initial columns have to follow a strict naming. The basic requirement to use Melusine is to have an input e-mail DataFrame with the following columns : - body : Body of an email (single message or conversation historic) - header : Header of an email - date : Reception date of an email - from : Email address of the sender - to (optional): Email address of the recipient - attachment (optional) : List of filenames attached to the email - label (optional): Label of the email for a classification task (examples: Business, Spam, Finance or Family) Each row correspond to a unique email. ``` from melusine.data.data_loader import load_email_data import ast df_emails = load_email_data() df_emails['attachment'] = df_emails['attachment'].apply(ast.literal_eval) df_emails.columns print('Body :') print(df_emails.body[1]) print('\n') print('Header :') print(df_emails.header[1]) print('Date :') print(df_emails.date[1]) print('From :') print(df_emails.loc[1,"from"]) print('To :') print(df_emails.to[1]) print('Attachment :') print(df_emails.attachment[1]) print('Label :') print(df_emails.label[1]) ``` ## Text preprocessing pipeline This pipeline will : - Update the columns of the dataframe if an email is transfered. - Segment the different messages of an email and tag its parts (hello, body, greetings, footer..). - Extract the body of the last message of the email. - Clean the body of the last message of the email. - Apply the phraser on the cleaned body. - Tokenize the cleaned body (once the phraser has been applied). The pipeline will return new columns at each steps, the most importants being : - **clean_body :** the body (with hello, greetings, signature, footers..) of the last message of an email, after cleaning and application of the phraser. This column will be used to train the embeddings and the neural networks - **tokens :** clean_body after tokenization. This column will be used for the keywords extraction. ``` from sklearn.pipeline import Pipeline from sklearn.preprocessing import LabelEncoder from melusine.utils.multiprocessing import apply_by_multiprocessing from melusine.utils.transformer_scheduler import TransformerScheduler from melusine.prepare_email.manage_transfer_reply import check_mail_begin_by_transfer from melusine.prepare_email.manage_transfer_reply import update_info_for_transfer_mail from melusine.prepare_email.manage_transfer_reply import add_boolean_transfer from melusine.prepare_email.manage_transfer_reply import add_boolean_answer from melusine.prepare_email.build_historic import build_historic from melusine.prepare_email.mail_segmenting import structure_email from melusine.prepare_email.body_header_extraction import extract_last_body from melusine.prepare_email.cleaning import clean_body from melusine.prepare_email.cleaning import clean_header from melusine.nlp_tools.phraser import Phraser from melusine.nlp_tools.phraser import phraser_on_body from melusine.nlp_tools.phraser import phraser_on_header from melusine.nlp_tools.tokenizer import Tokenizer from melusine.nlp_tools.embedding import Embedding # Transformer object to manage transfers and replies ManageTransferReply = TransformerScheduler( functions_scheduler=[ (check_mail_begin_by_transfer, None, ['is_begin_by_transfer']), (update_info_for_transfer_mail, None, None), (add_boolean_answer, None, ['is_answer']), (add_boolean_transfer, None, ['is_transfer']) ] ) # Transformer object to segment the different messages in the email, parse their metadata and # tag the different part of the messages Segmenting = TransformerScheduler( functions_scheduler=[ (build_historic, None, ['structured_historic']), (structure_email, None, ['structured_body']) ] ) # Transformer object to extract the body of the last message of the email and clean it as # well as the header LastBodyHeaderCleaning = TransformerScheduler( functions_scheduler=[ (extract_last_body, None, ['last_body']), (clean_body, None, ['clean_body']), (clean_header, None, ['clean_header']) ] ) # Transformer object to apply the phraser on the texts phraser = Phraser().load('./data/phraser.pickle') PhraserTransformer = TransformerScheduler( functions_scheduler=[ (phraser_on_body, (phraser,), ['clean_body']), (phraser_on_header, (phraser,), ['clean_header']) ] ) # Tokenizer object tokenizer = Tokenizer(input_column="clean_body") # Full preprocessing pipeline PreprocessingPipeline = Pipeline([ ('ManageTransferReply', ManageTransferReply), ('Segmenting', Segmenting), ('LastBodyHeaderCleaning', LastBodyHeaderCleaning), ('PhraserTransformer', PhraserTransformer), ('tokenizer', tokenizer) ]) df_emails = PreprocessingPipeline.fit_transform(df_emails) df_emails.columns ``` ## Metadata preprocessing pipeline The metadata have to be extracted before being dummified. This pipeline extractes the following metadata : - **extension :** from the "from" column. - **dayofweek :** from the date. - **hour :** from the date. - **min :** from the date. - **attachment_type :** from the attachment column. ``` from sklearn.pipeline import Pipeline from melusine.prepare_email.metadata_engineering import MetaExtension from melusine.prepare_email.metadata_engineering import MetaDate from melusine.prepare_email.metadata_engineering import MetaAttachmentType from melusine.prepare_email.metadata_engineering import Dummifier # Pipeline to extract dummified metadata MetadataPipeline = Pipeline([ ('MetaExtension', MetaExtension()), ('MetaDate', MetaDate()), ('MetaAttachmentType',MetaAttachmentType()), ('Dummifier', Dummifier()) ]) df_meta = MetadataPipeline.fit_transform(df_emails) df_meta.columns df_meta.head() ``` ## Keywords extraction Once a tokens column exists, keywords can be extracted by using the KeywordsGenerator class : ``` from melusine.summarizer.keywords_generator import KeywordsGenerator keywords_generator = KeywordsGenerator(n_max_keywords=4) df_emails = keywords_generator.fit_transform(df_emails) print(df_emails.body[23]) df_emails.clean_body[23] df_emails.tokens[23] df_emails.keywords[23] ``` ## Classification with neural networks Melusine offers a NeuralModel class to train, save, load and use for prediction any kind of neural networks based on Keras. Predefined architectures of RNN and CNN models using the cleaned body and the metadata of the emails are also offered. #### Embeddings training Embeddings have to be pretrained on the data set to be given as arguments of the neural networks. ``` from melusine.nlp_tools.embedding import Embedding pretrained_embedding = Embedding(input_column='clean_body', workers=1, min_count=5) pretrained_embedding.train(df_emails) ``` #### X and y preparation ``` import pandas as pd from sklearn.preprocessing import LabelEncoder X = pd.concat([df_emails['clean_body'],df_meta],axis=1) y = df_emails['label'] le = LabelEncoder() y = le.fit_transform(y) X.columns X.head() y ``` #### Training and predictions with a CNN ``` from melusine.models.neural_architectures import cnn_model from melusine.models.train import NeuralModel nn_model = NeuralModel(architecture_function=cnn_model, pretrained_embedding=pretrained_embedding, text_input_column="clean_body", meta_input_list=['extension', 'dayofweek','hour', 'min', 'attachment_type'], n_epochs=10) nn_model.fit(X,y) y_res = nn_model.predict(X) y_res = le.inverse_transform(y_res) y_res ```
github_jupyter
``` # export from nbdev.imports import * from nbdev.sync import * from nbdev.export import * from nbdev.showdoc import * from nbdev.template import * from html.parser import HTMLParser from nbconvert.preprocessors import ExecutePreprocessor, Preprocessor from nbconvert import HTMLExporter,MarkdownExporter import traitlets # default_exp export2html # default_cls_lvl 3 ``` # Convert to html > The functions that transform the dev notebooks in the documentation of the library - toc: true The most important function defined in this module is `notebook2html`, so you may want to jump to it before scrolling though the rest, which explain the details behind the scenes of the conversion from notebooks to the html documentation. The main things to remember are: - put a `#hide` flag at the top of any cell you want to completely hide in the docs - use the hide input [jupyter extension](https://github.com/ipython-contrib/jupyter_contrib_nbextensions) to hide the input of some cells (by default all `show_doc` cells have that marker added) - you can define some jekyll metadata in the markdown cell with the title, see `get_metadata` - use backsticks for terms you want automatic links to be found, but use `<code>` and `</code>` when you have homonyms and don't want those links - you can define the default toc level of classes with `# default_cls_lvl` flag followed by a number (default is 2) - you can add jekyll warnings, important or note banners with appropriate block quotes (see `add_jekyll_notes`) - put any images you want to use in the images folder of your notebook folder, they will be automatically copied over to the docs folder ## Preprocessing notebook ### Cell processors ``` #export class HTMLParseAttrs(HTMLParser): "Simple HTML parser which stores any attributes in `attrs` dict" def handle_starttag(self, tag, attrs): self.tag,self.attrs = tag,dict(attrs) def attrs2str(self): "Attrs as string" return ' '.join([f'{k}="{v}"' for k,v in self.attrs.items()]) def show(self): "Tag with updated attrs" return f'<{self.tag} {self.attrs2str()} />' def __call__(self, s): "Parse `s` and store attrs" self.feed(s) return self.attrs h = HTMLParseAttrs() t = h('<img src="src" alt="alt" width="700" caption="cap" />') test_eq(t['width'], '700') test_eq(t['src' ], 'src') t['width'] = '600' test_eq(h.show(), '<img src="src" alt="alt" width="600" caption="cap" />') t['max-width'] = t.pop('width') test_eq(h.show(), '<img src="src" alt="alt" caption="cap" max-width="600" />') ``` The following functions are applied on individual cells as a preprocessing step before the conversion to html. ``` #export def remove_widget_state(cell): "Remove widgets in the output of `cells`" if cell['cell_type'] == 'code' and 'outputs' in cell: cell['outputs'] = [l for l in cell['outputs'] if not ('data' in l and 'application/vnd.jupyter.widget-view+json' in l.data)] return cell ``` Those outputs usually can't be rendered properly in html. ``` #export # Matches any cell that has a `show_doc` or an `#export` in it _re_cell_to_hide = r's*show_doc\(|^\s*#\s*export\s+|^\s*#\s*hide_input\s+' #export def hide_cells(cell): "Hide inputs of `cell` that need to be hidden" if check_re(cell, _re_cell_to_hide): cell['metadata'] = {'hide_input': True} return cell ``` This concerns all the cells with a `# export` flag and all the cell containing a `show_doc` for a function or class. ``` for source in ['show_doc(read_nb)', '# export\nfrom local.core import *', '# hide_input\n2+2']: cell = {'cell_type': 'code', 'source': source} cell1 = hide_cells(cell.copy()) assert 'metadata' in cell1 assert 'hide_input' in cell1['metadata'] assert cell1['metadata']['hide_input'] cell = {'cell_type': 'code', 'source': '# exports\nfrom local.core import *'} test_eq(hide_cells(cell.copy()), cell) #export # Matches any line containing an #exports _re_exports = re.compile(r'^#\s*exports[^\n]*\n') #export def clean_exports(cell): "Remove exports flag from `cell`" cell['source'] = _re_exports.sub('', cell['source']) return cell ``` The rest of the cell is displayed without any modification. ``` cell = {'cell_type': 'code', 'source': '# exports\nfrom local.core import *'} test_eq(clean_exports(cell.copy()), {'cell_type': 'code', 'source': 'from local.core import *'}) cell = {'cell_type': 'code', 'source': '# exports core\nfrom local.core import *'} test_eq(clean_exports(cell.copy()), {'cell_type': 'code', 'source': 'from local.core import *'}) #export def treat_backticks(cell): "Add links to backticks words in `cell`" if cell['cell_type'] == 'markdown': cell['source'] = add_doc_links(cell['source']) return cell cell = {'cell_type': 'markdown', 'source': 'This is a `DocsTestClass`'} test_eq(treat_backticks(cell), {'cell_type': 'markdown', 'source': 'This is a [`DocsTestClass`](/export#DocsTestClass)'}) #export _re_nb_link = re.compile(r""" # Catches any link to a local notebook and keeps the title in group 1, the link without .ipynb in group 2 \[ # Opening [ ([^\]]*) # Catching group for any character except ] \]\( # Closing ], opening ( ([^http] # Catching group that must not begin by html (local notebook) [^\)]*) # and containing anything but ) .ipynb\) # .ipynb and closing ) """, re.VERBOSE) #export _re_block_notes = re.compile(r""" # Catches any pattern > Title: content with title in group 1 and content in group 2 ^\s*>\s* # > followed by any number of whitespace ([^:]*) # Catching group for any character but : :\s* # : then any number of whitespace ([^\n]*) # Catching group for anything but a new line character (?:\n|$) # Non-catching group for either a new line or the end of the text """, re.VERBOSE | re.MULTILINE) #export def _to_html(text): return text.replace("'", "&#8217;") #export def add_jekyll_notes(cell): "Convert block quotes to jekyll notes in `cell`" styles = Config().get('jekyll_styles', 'note,warning,tip,important').split(',') def _inner(m): title,text = m.groups() if title.lower() not in styles: return f"> {title}:{text}" return '{% include '+title.lower()+".html content=\'"+_to_html(text)+"\' %}" if cell['cell_type'] == 'markdown': cell['source'] = _re_block_notes.sub(_inner, cell['source']) return cell ``` Supported styles are `Warning`, `Note` `Tip` and `Important`: Typing `> Warning: There will be no second warning!` will render in the docs: > Warning: There will be no second warning! Typing `> Important: Pay attention! It's important.` will render in the docs: > Important: Pay attention! It's important. Typing `> Tip: This is my tip.` will render in the docs: > Tip: This is my tip. Typing `> Note: Take note of this.` will render in the docs: > Note: Take note of this. Typing ``> Note: A doc link to `add_jekyll_notes` should also work fine.`` will render in the docs: > Note: A doc link to `add_jekyll_notes` should also work fine. ``` #hide for w in ['Warning', 'Note', 'Important', 'Tip', 'Bla']: cell = {'cell_type': 'markdown', 'source': f"> {w}: This is my final {w.lower()}!"} res = '{% include '+w.lower()+'.html content=\'This is my final '+w.lower()+'!\' %}' if w != 'Bla': test_eq(add_jekyll_notes(cell), {'cell_type': 'markdown', 'source': res}) else: test_eq(add_jekyll_notes(cell), cell) #hide cell = {'cell_type': 'markdown', 'source': f"> This is a link, don't break me! https://my.link.com"} test_eq(add_jekyll_notes(cell.copy()), cell) #export _re_image = re.compile(r""" # Catches any image file used, either with `![alt](image_file)` or `<img src="image_file">` ^(!\[ # Beginning of line (since re.MULTILINE is passed) followed by ![ in a catching group [^\]]* # Anything but ] \]\() # Closing ] and opening (, end of the first catching group ([^\)]*) # Catching block with any character but ) (\)) # Catching group with closing ) | # OR ^(<img\ [^>]*>) # Catching group with <img some_html_code> """, re.MULTILINE | re.VERBOSE) _re_image1 = re.compile(r"^<img\ [^>]*>", re.MULTILINE) #export def _img2jkl(d, h, jekyll=True): if not jekyll: return '<img ' + h.attrs2str() + '>' if 'width' in d: d['max-width'] = d.pop('width') if 'src' in d: d['file'] = d.pop('src') return '{% include image.html ' + h.attrs2str() + ' %}' #export def _is_real_image(src): return not (src.startswith('http://') or src.startswith('https://') or src.startswith('data:image/')) #export def copy_images(cell, fname, dest, jekyll=True): "Copy images referenced in `cell` from `fname` parent folder to `dest` folder" def _rep_src(m): grps = m.groups() if grps[3] is not None: h = HTMLParseAttrs() dic = h(grps[3]) src = dic['src'] else: cap = re.search(r'(\s"[^"]*")', grps[1]) if cap is not None: grps = (grps[0], re.sub(r'\s"[^"]*"', '', grps[1]), cap.groups()[0] + grps[2], grps[3]) src = grps[1] if _is_real_image(src): os.makedirs((Path(dest)/src).parent, exist_ok=True) shutil.copy(Path(fname).parent/src, Path(dest)/src) src = Config().doc_baseurl + src if grps[3] is not None: dic['src'] = src return _img2jkl(dic, h, jekyll=jekyll) else: return f"{grps[0]}{src}{grps[2]}" if cell['cell_type'] == 'markdown': cell['source'] = _re_image.sub(_rep_src, cell['source']) return cell ``` This is to ensure that all images defined in `nbs_folder/images` and used in notebooks are copied over to `doc_folder/images`. ``` dest_img = Config().doc_path/'images'/'logo.png' cell = {'cell_type': 'markdown', 'source':'Text\n![Alt](images/logo.png)'} try: copy_images(cell, Path('01_export.ipynb'), Config().doc_path) test_eq(cell["source"], 'Text\n![Alt](/images/logo.png)') #Image has been copied assert dest_img.exists() cell = {'cell_type': 'markdown', 'source':'Text\n![Alt](images/logo.png "caption")'} copy_images(cell, Path('01_export.ipynb'), Config().doc_path) test_eq(cell["source"], 'Text\n![Alt](/images/logo.png "caption")') finally: dest_img.unlink() #hide cell = {'cell_type': 'markdown', 'source':'Text\n![Alt](https://site.logo.png)'} copy_images(cell, Path('01_export.ipynb'), Config().doc_path) test_eq(cell["source"], 'Text\n![Alt](https://site.logo.png)') cell = {'cell_type': 'markdown', 'source':'Text\n![Alt](https://site.logo.png "caption")'} copy_images(cell, Path('01_export.ipynb'), Config().doc_path) test_eq(cell["source"], 'Text\n![Alt](https://site.logo.png "caption")') #hide cell = {'cell_type': 'markdown', 'source': 'Text\n<img src="images/logo.png" alt="alt" width="600" caption="cap" />'} try: copy_images(cell, Path('01_export.ipynb'), Config().doc_path) test_eq(cell["source"], 'Text\n{% include image.html alt="alt" caption="cap" max-width="600" file="/images/logo.png" %}') assert dest_img.exists() finally: dest_img.unlink() #hide cell = {'cell_type': 'markdown', 'source': 'Text\n<img src="http://site.logo.png" alt="alt" width="600" caption="cap" />'} copy_images(cell, Path('01_export.ipynb'), Config().doc_path) test_eq(cell["source"], 'Text\n{% include image.html alt="alt" caption="cap" max-width="600" file="http://site.logo.png" %}') #export def _relative_to(path1, path2): p1,p2 = Path(path1).absolute().parts,Path(path2).absolute().parts i=0 while i <len(p1) and i<len(p2) and p1[i] == p2[i]: i+=1 p1,p2 = p1[i:],p2[i:] return os.path.sep.join(['..' for _ in p2] + list(p1)) #hide test_eq(_relative_to(Path('images/logo.png'), Config().doc_path), '../nbs/images/logo.png') test_eq(_relative_to(Path('images/logo.png'), Config().doc_path.parent), 'nbs/images/logo.png') #export def adapt_img_path(cell, fname, dest, jekyll=True): "Adapt path of images referenced in `cell` from `fname` to work in folder `dest`" def _rep(m): gps = m.groups() if gps[0] is not None: start,img,end = gps[:3] if not (img.startswith('http:/') or img.startswith('https:/')): img = _relative_to(fname.parent/img, dest) return f'{start}{img}{end}' else: h = HTMLParseAttrs() dic = h(gps[3]) if not (dic['src'].startswith('http:/') or dic['src'].startswith('https:/')): dic['src'] = _relative_to(fname.parent/dic['src'], dest) return _img2jkl(dic, h, jekyll=jekyll) if cell['cell_type'] == 'markdown': cell['source'] = _re_image.sub(_rep, cell['source']) return cell ``` This function is slightly different as it ensures that a notebook convert to a file that will be placed in `dest` will have the images location updated. It is used for the `README.md` file (generated automatically from the index) since the images are copied inside the github repo, but in general, you should make sure your images are going to be accessible from the location your file ends up being. ``` cell = {'cell_type': 'markdown', 'source': 'Text\n![Alt](images/logo.png)'} cell1 = adapt_img_path(cell, Path('01_export.ipynb'), Path('.').absolute().parent) test_eq(cell1['source'], 'Text\n![Alt](nbs/images/logo.png)') cell = {'cell_type': 'markdown', 'source': 'Text\n![Alt](http://site.logo.png)'} cell1 = adapt_img_path(cell, Path('01_export.ipynb'), Path('.').absolute().parent) test_eq(cell1['source'], 'Text\n![Alt](http://site.logo.png)') cell = {'cell_type': 'markdown', 'source': 'Text\n<img alt="Logo" src="images/logo.png" width="600"/>'} cell1 = adapt_img_path(cell, Path('01_export.ipynb'), Path('.').absolute().parent) test_eq(cell1['source'], 'Text\n{% include image.html alt="Logo" max-width="600" file="nbs/images/logo.png" %}') cell = {'cell_type': 'markdown', 'source': 'Text\n<img alt="Logo" src="https://site.image.png" width="600"/>'} cell1 = adapt_img_path(cell, Path('01_export.ipynb'), Path('.').absolute().parent) test_eq(cell1['source'], 'Text\n{% include image.html alt="Logo" max-width="600" file="https://site.image.png" %}') ``` Escape Latex in liquid ``` #export _re_latex = re.compile(r'^(\$\$.*\$\$)$', re.MULTILINE) #export def escape_latex(cell): if cell['cell_type'] != 'markdown': return cell cell['source'] = _re_latex.sub(r'{% raw %}\n\1\n{% endraw %}', cell['source']) return cell cell = {'cell_type': 'markdown', 'source': 'lala\n$$equation$$\nlala'} cell = escape_latex(cell) test_eq(cell['source'], 'lala\n{% raw %}\n$$equation$$\n{% endraw %}\nlala') ``` ### Collapsable Code Cells ``` #export #Matches any cell with #collapse or #collapse_hide _re_cell_to_collapse_closed = re.compile(r'^\s*#\s*(collapse|collapse_hide|collapse-hide)\s+') #Matches any cell with #collapse_show _re_cell_to_collapse_open = re.compile(r'^\s*#\s*(collapse_show|collapse-show)\s+') #export def collapse_cells(cell): "Add a collapse button to inputs of `cell` in either the open or closed position" if check_re(cell, _re_cell_to_collapse_closed): cell['metadata'] = {'collapse_hide': True} elif check_re(cell, _re_cell_to_collapse_open): cell['metadata'] = {'collapse_show': True} return cell ``` - Placing `#collapse_open` as a comment in a code cell will inlcude your code under a collapsable element that is **open** by default. ``` #collapse_open print('This code cell is not collapsed by default but you can collapse it to hide it from view!') print("Note that the output always shows with `#collapse`.") ``` - Placing `#collapse` or `#collapse_closed` will include your code in a collapsable element that is **closed** by default. For example: ``` #collapse print('The code cell that produced this output is collapsed by default but you can expand it!') ``` ### Preprocessing the list of cells The following functions are applied to the entire list of cells of the notebook as a preprocessing step before the conversion to html. ``` #export #Matches any cell with #hide or #default_exp or #default_cls_lvl or #exporti _re_cell_to_remove = re.compile(r'^\s*#\s*(hide\s|default_exp|default_cls_lvl|exporti|all_([^\s]*))\s*') #export def remove_hidden(cells): "Remove in `cells` the ones with a flag `#hide`, `#default_exp` or `#default_cls_lvl` or `#exporti`" return [c for c in cells if _re_cell_to_remove.search(c['source']) is None] cells = [{'cell_type': 'code', 'source': source} for source in [ '# export\nfrom local.core import *', '# hide\nfrom local.core import *', '#exports\nsuper code', '#default_exp notebook.export', 'show_doc(read_nb)', '#default_cls_lvl 3', '#all_slow', '# exporti\n1 + 1']] + [{'cell_type': 'markdown', 'source': source} for source in [ '#hide_input\nnice', '#hide\n\nto hide']] cells1 = remove_hidden(cells) test_eq(len(cells1), 4) test_eq(cells1[0], cells[0]) test_eq(cells1[1], cells[2]) test_eq(cells1[2], cells[4]) test_eq(cells1[3], cells[8]) #export _re_default_cls_lvl = re.compile(r""" ^ # Beginning of line (since re.MULTILINE is passed) \s*\#\s* # Any number of whitespace, #, any number of whitespace default_cls_lvl # default_cls_lvl \s* # Any number of whitespace (\d*) # Catching group for any number of digits \s*$ # Any number of whitespace and end of line (since re.MULTILINE is passed) """, re.IGNORECASE | re.MULTILINE | re.VERBOSE) # export def find_default_level(cells): "Find in `cells` the default class level." for cell in cells: tst = check_re(cell, _re_default_cls_lvl) if tst: return int(tst.groups()[0]) return 2 tst_nb = read_nb('00_export.ipynb') test_eq(find_default_level(tst_nb['cells']), 3) #export #Find a cell with #export(s) _re_export = re.compile(r'^\s*#\s*exports?\s*', re.IGNORECASE | re.MULTILINE) _re_show_doc = re.compile(r""" # First one catches any cell with a #export or #exports, second one catches any show_doc and get the first argument in group 1 show_doc # show_doc \s*\(\s* # Any number of whitespace, opening (, any number of whitespace ([^,\)\s]*) # Catching group for any character but a comma, a closing ) or a whitespace [,\)\s] # A comma, a closing ) or a whitespace """, re.MULTILINE | re.VERBOSE) #export def _show_doc_cell(name, cls_lvl=None): return {'cell_type': 'code', 'execution_count': None, 'metadata': {}, 'outputs': [], 'source': f"show_doc({name}{'' if cls_lvl is None else f', default_cls_level={cls_lvl}'})"} def add_show_docs(cells, cls_lvl=None): "Add `show_doc` for each exported function or class" documented = [_re_show_doc.search(cell['source']).groups()[0] for cell in cells if cell['cell_type']=='code' and _re_show_doc.search(cell['source']) is not None] res = [] for cell in cells: res.append(cell) if check_re(cell, _re_export): names = export_names(cell['source'], func_only=True) for n in names: if n not in documented: res.append(_show_doc_cell(n, cls_lvl=cls_lvl)) return res ``` This only adds cells with a `show_doc` for non-documented functions, so if you add yourself a `show_doc` cell (because you want to change one of the default argument), there won't be any duplicates. ``` for i,cell in enumerate(tst_nb['cells']): if cell['source'].startswith('#export\ndef read_nb'): break tst_cells = [c.copy() for c in tst_nb['cells'][i-1:i+1]] added_cells = add_show_docs(tst_cells, cls_lvl=3) test_eq(len(added_cells), 3) test_eq(added_cells[0], tst_nb['cells'][i-1]) test_eq(added_cells[1], tst_nb['cells'][i]) test_eq(added_cells[2], _show_doc_cell('read_nb', cls_lvl=3)) test_eq(added_cells[2]['source'], 'show_doc(read_nb, default_cls_level=3)') #Check show_doc isn't added if it was already there. tst_cells1 = [{'cell_type':'code', 'source': '#export\ndef my_func(x):\n return x'}, {'cell_type':'code', 'source': 'show_doc(my_func)'}] test_eq(add_show_docs(tst_cells1), tst_cells1) tst_cells1 = [{'cell_type':'code', 'source': '#export\ndef my_func(x):\n return x'}, {'cell_type':'markdown', 'source': 'Some text'}, {'cell_type':'code', 'source': 'show_doc(my_func, title_level=3)'}] test_eq(add_show_docs(tst_cells1), tst_cells1) #export _re_fake_header = re.compile(r""" # Matches any fake header (one that ends with -) \#+ # One or more # \s+ # One or more of whitespace .* # Any char -\s* # A dash followed by any number of white space $ # End of text """, re.VERBOSE) # export def remove_fake_headers(cells): "Remove in `cells` the fake header" return [c for c in cells if c['cell_type']=='code' or _re_fake_header.search(c['source']) is None] ``` You can fake headers in your notebook to navigate them more easily with collapsible headers, just make them finish with a dash and they will be removed. One typicl use case is to have a header of level 2 with the name of a class, since the `show_doc` cell of that class will create the same anchor, you need to have the one you created manually disappear to avoid any duplicate. ``` cells = [{'cell_type': 'markdown', 'metadata': {}, 'source': '### Fake-'}] + tst_nb['cells'][:10] cells1 = remove_fake_headers(cells) test_eq(len(cells1), len(cells)-1) test_eq(cells1[0], cells[1]) # export def remove_empty(cells): "Remove in `cells` the empty cells" return [c for c in cells if len(c['source']) >0] ``` ### Grabbing metada ``` #export _re_title_summary = re.compile(r""" # Catches the title and summary of the notebook, presented as # Title > summary, with title in group 1 and summary in group 2 ^\s* # Beginning of text followe by any number of whitespace \#\s+ # # followed by one or more of whitespace ([^\n]*) # Catching group for any character except a new line \n+ # One or more new lines >[ ]* # > followed by any number of whitespace ([^\n]*) # Catching group for any character except a new line """, re.VERBOSE) _re_properties = re.compile(r""" ^-\s+ # Beginnig of a line followed by - and at least one space (.*?) # Any pattern (shortest possible) \s*:\s* # Any number of whitespace, :, any number of whitespace (.*?)$ # Any pattern (shortest possible) then end of line """, re.MULTILINE | re.VERBOSE) # export def get_metadata(cells): "Find the cell with title and summary in `cells`." for i,cell in enumerate(cells): if cell['cell_type'] == 'markdown': match = _re_title_summary.match(cell['source']) if match: cells.pop(i) attrs = {k:v for k,v in _re_properties.findall(cell['source'])} return {'keywords': 'fastai', 'summary' : match.groups()[1], 'title' : match.groups()[0], **attrs} return {'keywords': 'fastai', 'summary' : 'summary', 'title' : 'Title'} ``` In the markdown cell with the title, you can add the summary as a block quote (just put an empty block quote for an empty summary) and a list with any additional metada you would like to add, for instance: ``` # Title > Awesome summary - toc: False ``` The toc: False metadata will prevent the table of contents from showing on the page. ``` tst_nb = read_nb('00_export.ipynb') test_eq(get_metadata(tst_nb['cells']), { 'keywords': 'fastai', 'summary': 'The functions that transform notebooks in a library', 'title': 'Export to modules'}) #The cell with the metada is popped out, so if we do it a second time we get the default. test_eq(get_metadata(tst_nb['cells']), {'keywords': 'fastai', 'summary' : 'summary', 'title' : 'Title'}) #hide cells = [{'cell_type': 'markdown', 'source': "# Title\n\n> s\n\n- toc: false"}] test_eq(get_metadata(cells), {'keywords': 'fastai', 'summary': 's', 'title': 'Title', 'toc': 'false'}) ``` ## Executing show_doc cells ``` # export _re_cell_to_execute = ReLibName(r"^\s*show_doc\(([^\)]*)\)|^from LIB_NAME\.", re.MULTILINE) # export class ExecuteShowDocPreprocessor(ExecutePreprocessor): "An `ExecutePreprocessor` that only executes `show_doc` and `import` cells" def preprocess_cell(self, cell, resources, index): if 'source' in cell and cell['cell_type'] == "code": if _re_cell_to_execute.re.search(cell['source']): return super().preprocess_cell(cell, resources, index) return cell, resources # export def _import_show_doc_cell(mod=None): "Add an import show_doc cell." source = f"#export\nfrom nbdev.showdoc import show_doc" if mod: source += f"\nfrom {Config().lib_name}.{mod} import *" return {'cell_type': 'code', 'execution_count': None, 'metadata': {'hide_input': True}, 'outputs': [], 'source': source} def execute_nb(nb, mod=None, metadata=None, show_doc_only=True): "Execute `nb` (or only the `show_doc` cells) with `metadata`" nb['cells'].insert(0, _import_show_doc_cell(mod)) ep_cls = ExecuteShowDocPreprocessor if show_doc_only else ExecutePreprocessor ep = ep_cls(timeout=600, kernel_name='python3') metadata = metadata or {} pnb = nbformat.from_dict(nb) ep.preprocess(pnb, metadata) return pnb ``` ## Converting bibtex citations ``` #export _re_cite = re.compile(r"(\\cite{)([^}]*)(})", re.MULTILINE | re.VERBOSE) # Catches citations used with `\cite{}` #export def _textcite2link(text): citations = _re_cite.finditer(text) out = [] start_pos = 0 for cit_group in citations: cit_pos_st = cit_group.span()[0] cit_pos_fin = cit_group.span()[1] out.append(text[start_pos:cit_pos_st]) out.append('[') cit_group = cit_group[2].split(',') for i, cit in enumerate(cit_group): cit=cit.strip() out.append(f"""<a class="latex_cit" id="call-{cit}" href="#cit-{cit}">{cit}</a>""") if i != len(cit_group) - 1: out.append(',') out.append(']') start_pos = cit_pos_fin out.append(text[start_pos:]) return ''.join(out) #export def cite2link(cell): '''Creates links from \cite{} to Refenrence section generated by jupyter_latex_envs''' if cell['cell_type'] == 'markdown': cell['source'] = _textcite2link(cell['source']) return cell ``` jupyter_latex_envs is a jupyter extension https://github.com/jfbercher/jupyter_latex_envs. You can find relevant section [here](https://rawgit.com/jfbercher/jupyter_latex_envs/master/src/latex_envs/static/doc/latex_env_doc.html#Bibliography) Note, that nbdev now only supports `\cite{}` conversion and not the rest, e.g., `\figure{}` and so on. ``` #hide cell = {'cell_type': 'markdown', 'source': r"""This is cited multireference \cite{Frob1, Frob3}. And single \cite{Frob2}."""} expected=r"""This is cited multireference [<a class="latex_cit" id="call-Frob1" href="#cit-Frob1">Frob1</a>,<a class="latex_cit" id="call-Frob3" href="#cit-Frob3">Frob3</a>]. And single [<a class="latex_cit" id="call-Frob2" href="#cit-Frob2">Frob2</a>].""" test_eq(cite2link(cell)["source"], expected) ``` It's important to execute all `show_doc` cells before exporting the notebook to html because some of them have just been added automatically or others could have outdated links. ``` fake_nb = {k:v for k,v in tst_nb.items() if k != 'cells'} fake_nb['cells'] = [tst_nb['cells'][0].copy()] + added_cells fake_nb = execute_nb(fake_nb, mod='export') assert len(fake_nb['cells'][-1]['outputs']) > 0 ``` ## Filling templates The following functions automatically adds jekyll templates if they are misssing. ``` #export def write_tmpl(tmpl, nms, cfg, dest): "Write `tmpl` to `dest` (if missing) filling in `nms` in template using dict `cfg`" if dest.exists(): return vs = {o:cfg.d[o] for o in nms.split()} outp = tmpl.format(**vs) dest.write_text(outp) #export def write_tmpls(): "Write out _config.yml and _data/topnav.yml using templates" cfg = Config() write_tmpl(config_tmpl, 'user lib_name title copyright description', cfg, cfg.doc_path/'_config.yml') write_tmpl(topnav_tmpl, 'user lib_name', cfg, cfg.doc_path/'_data'/'topnav.yml') write_tmpl(makefile_tmpl, 'nbs_path lib_name', cfg, cfg.config_file.parent/'Makefile') ``` ## Conversion ``` __file__ = Config().lib_path/'export2html.py' # export def nbdev_exporter(cls=HTMLExporter, template_file=None): cfg = traitlets.config.Config() exporter = cls(cfg) exporter.exclude_input_prompt=True exporter.exclude_output_prompt=True exporter.anchor_link_text = ' ' exporter.template_file = 'jekyll.tpl' if template_file is None else template_file exporter.template_path.append(str(Path(__file__).parent/'templates')) return exporter # export process_cells = [remove_fake_headers, remove_hidden, remove_empty] process_cell = [hide_cells, collapse_cells, remove_widget_state, add_jekyll_notes, escape_latex, cite2link] # export _re_digits = re.compile(r'^\d+\S*?_') #export def _nb2htmlfname(nb_path, dest=None): if dest is None: dest = Config().doc_path return Path(dest)/_re_digits.sub('', nb_path.with_suffix('.html').name) #hide test_eq(_nb2htmlfname(Path('00a_export.ipynb')), Config().doc_path/'export.html') test_eq(_nb2htmlfname(Path('export.ipynb')), Config().doc_path/'export.html') test_eq(_nb2htmlfname(Path('00ab_export_module_1.ipynb')), Config().doc_path/'export_module_1.html') test_eq(_nb2htmlfname(Path('export.ipynb'), '.'), Path('export.html')) # export def convert_nb(fname, cls=HTMLExporter, template_file=None, exporter=None, dest=None): "Convert a notebook `fname` to html file in `dest_path`." fname = Path(fname).absolute() nb = read_nb(fname) meta_jekyll = get_metadata(nb['cells']) meta_jekyll['nb_path'] = str(fname.relative_to(Config().lib_path.parent)) cls_lvl = find_default_level(nb['cells']) mod = find_default_export(nb['cells']) nb['cells'] = compose(*process_cells,partial(add_show_docs, cls_lvl=cls_lvl))(nb['cells']) _func = compose(partial(copy_images, fname=fname, dest=Config().doc_path), *process_cell, treat_backticks) nb['cells'] = [_func(c) for c in nb['cells']] nb = execute_nb(nb, mod=mod) nb['cells'] = [clean_exports(c) for c in nb['cells']] if exporter is None: exporter = nbdev_exporter(cls=cls, template_file=template_file) with open(_nb2htmlfname(fname, dest=dest),'w') as f: f.write(exporter.from_notebook_node(nb, resources=meta_jekyll)[0]) # export def _notebook2html(fname, cls=HTMLExporter, template_file=None, exporter=None, dest=None): time.sleep(random.random()) print(f"converting: {fname}") try: convert_nb(fname, cls=cls, template_file=template_file, exporter=exporter, dest=dest) return True except Exception as e: print(e) return False # export def notebook2html(fname=None, force_all=False, n_workers=None, cls=HTMLExporter, template_file=None, exporter=None, dest=None): "Convert all notebooks matching `fname` to html files" if fname is None: files = [f for f in Config().nbs_path.glob('*.ipynb') if not f.name.startswith('_')] else: p = Path(fname) files = list(p.parent.glob(p.name)) if len(files)==1: force_all = True if n_workers is None: n_workers=0 if not force_all: # only rebuild modified files files,_files = [],files.copy() for fname in _files: fname_out = _nb2htmlfname(Path(fname).absolute(), dest=dest) if not fname_out.exists() or os.path.getmtime(fname) >= os.path.getmtime(fname_out): files.append(fname) if len(files)==0: print("No notebooks were modified") else: passed = parallel(_notebook2html, files, n_workers=n_workers, cls=cls, template_file=template_file, exporter=exporter, dest=dest) if not all(passed): msg = "Conversion failed on the following:\n" raise Exception(msg + '\n'.join([f.name for p,f in zip(passed,files) if not p])) #hide # Test when an argument is given to notebook2html p1 = Path('/tmp/sync.html') if p1.exists(): p1.unlink() notebook2html('01_sync.ipynb', dest='/tmp'); assert p1.exists() # Test when no argument is given to notebook2html dest_files = [_nb2htmlfname(f, dest='/tmp') for f in Config().nbs_path.glob('*.ipynb') if not f.name.startswith('_')] [f.unlink() for f in dest_files if f.exists()] notebook2html(fname=None, dest='/tmp'); assert all([f.exists() for f in dest_files]) # Test Error handling try: notebook2html('../README.md'); except Exception as e: assert True else: assert False, 'An error should be raised when a non-notebook file is passed to notebook2html!' ``` Hide cells starting with `#export` and only leaves the prose and the tests. If `fname` is not specified, this will convert all notebooks not beginning with an underscore in the `nb_folder` defined in `setting.ini`. Otherwise `fname` can be a single filename or a glob expression. By default, only the notebooks that are more recent than their html counterparts are modified, pass `force_all=True` to change that behavior. ``` #hide #notebook2html(force_all=True) # export def convert_md(fname, dest_path, img_path='docs/images/', jekyll=True): "Convert a notebook `fname` to a markdown file in `dest_path`." fname = Path(fname).absolute() if not img_path: img_path = fname.stem + '_files/' Path(img_path).mkdir(exist_ok=True, parents=True) nb = read_nb(fname) meta_jekyll = get_metadata(nb['cells']) try: meta_jekyll['nb_path'] = str(fname.relative_to(Config().lib_path.parent)) except: meta_jekyll['nb_path'] = str(fname) nb['cells'] = compose(*process_cells)(nb['cells']) nb['cells'] = [compose(partial(adapt_img_path, fname=fname, dest=dest_path, jekyll=jekyll), *process_cell)(c) for c in nb['cells']] fname = Path(fname).absolute() dest_name = fname.with_suffix('.md').name exp = nbdev_exporter(cls=MarkdownExporter, template_file='jekyll-md.tpl' if jekyll else 'md.tpl') export = exp.from_notebook_node(nb, resources=meta_jekyll) md = export[0] for ext in ['png', 'svg']: md = re.sub(r'!\['+ext+'\]\((.+)\)', '!['+ext+'](' + img_path + '\\1)', md) with (Path(dest_path)/dest_name).open('w') as f: f.write(md) for n,o in export[1]['outputs'].items(): with open(Path(dest_path)/img_path/n, 'wb') as f: f.write(o) ``` This is used to convert the index into the `README.md`. ``` #hide try: convert_md('index.ipynb', Path('.').absolute().parent, jekyll=False) finally: (Path('.').absolute().parent/'index.md').unlink() #export _re_att_ref = re.compile(r' *!\[(.*)\]\(attachment:image.png(?: "(.*)")?\)') t = '![screenshot](attachment:image.png)' test_eq(_re_att_ref.match(t).groups(), ('screenshot', None)) t = '![screenshot](attachment:image.png "Deploying to Binder")' test_eq(_re_att_ref.match(t).groups(), ('screenshot', "Deploying to Binder")) #export try: from PIL import Image except: pass # Only required for _update_att_ref #export _tmpl_img = '<img alt="{title}" width="{width}" caption="{title}" id="{id}" src="{name}">' def _update_att_ref(line, path, img): m = _re_att_ref.match(line) if not m: return line alt,title = m.groups() w = img.size[0] if alt=='screenshot': w //= 2 if not title: title = "TK: add title" return _tmpl_img.format(title=title, width=str(w), id='TK: add it', name=str(path)) #export def _nb_detach_cell(cell, dest, use_img): att,src = cell['attachments'],cell['source'] mime,img = first(first(att.values()).items()) ext = mime.split('/')[1] for i in range(99999): p = dest/(f'att_{i:05d}.{ext}') if not p.exists(): break img = b64decode(img) p.write_bytes(img) del(cell['attachments']) if use_img: return [_update_att_ref(o,p,Image.open(p)) for o in src] else: return [o.replace('attachment:image.png', str(p)) for o in src] #export def nb_detach_cells(path_nb, dest=None, replace=True, use_img=False): "Export cell attachments to `dest` and update references" path_nb = Path(path_nb) if not dest: dest = f'{path_nb.stem}_files' dest = Path(dest) dest.mkdir(exist_ok=True, parents=True) j = json.load(path_nb.open()) atts = [o for o in j['cells'] if 'attachments' in o] for o in atts: o['source'] = _nb_detach_cell(o, dest, use_img) if atts and replace: json.dump(j, path_nb.open('w')) if not replace: return j ``` ## Sidebar ``` #export import time,random,warnings #export def _leaf(k,v): url = 'external_url' if "http" in v else 'url' #if url=='url': v=v+'.html' return {'title':k, url:v, 'output':'web,pdf'} #export _k_names = ['folders', 'folderitems', 'subfolders', 'subfolderitems'] def _side_dict(title, data, level=0): k_name = _k_names[level] level += 1 res = [(_side_dict(k, v, level) if isinstance(v,dict) else _leaf(k,v)) for k,v in data.items()] return ({k_name:res} if not title else res if title.startswith('empty') else {'title': title, 'output':'web', k_name: res}) #export _re_catch_title = re.compile('^title\s*:\s*(\S+.*)$', re.MULTILINE) #export def _get_title(fname): "Grabs the title of html file `fname`" with open(fname, 'r') as f: code = f.read() src = _re_catch_title.search(code) return fname.stem if src is None else src.groups()[0] #hide test_eq(_get_title(Config().doc_path/'export.html'), "Export to modules") #export def create_default_sidebar(): "Create the default sidebar for the docs website" dic = {"Overview": "/"} files = [f for f in Config().nbs_path.glob('*.ipynb') if not f.name.startswith('_')] fnames = [_nb2htmlfname(f) for f in sorted(files)] titles = [_get_title(f) for f in fnames if 'index' not in f.stem!='index'] if len(titles) > len(set(titles)): print(f"Warning: Some of your Notebooks use the same title ({titles}).") dic.update({_get_title(f):f'/{f.stem}' for f in fnames if f.stem!='index'}) dic = {Config().lib_name: dic} json.dump(dic, open(Config().doc_path/'sidebar.json', 'w'), indent=2) ``` The default sidebar lists all html pages with their respective title, except the index that is named "Overview". To build a custom sidebar, set the flag `custom_sidebar` in your `settings.ini` to `True` then change the `sidebar.json` file in the `doc_folder` to your liking. Otherwise, the sidebar is updated at each doc build. ``` #hide #create_default_sidebar() #export def make_sidebar(): "Making sidebar for the doc website form the content of `doc_folder/sidebar.json`" if not (Config().doc_path/'sidebar.json').exists() or Config().custom_sidebar == 'False': create_default_sidebar() sidebar_d = json.load(open(Config().doc_path/'sidebar.json', 'r')) res = _side_dict('Sidebar', sidebar_d) res = {'entries': [res]} res_s = yaml.dump(res, default_flow_style=False) res_s = res_s.replace('- subfolders:', ' subfolders:').replace(' - - ', ' - ') res_s = f""" ################################################# ### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ### ################################################# # Instead edit {'../../sidebar.json'} """+res_s open(Config().doc_path/'_data/sidebars/home_sidebar.yml', 'w').write(res_s) ``` ## Export- ``` #hide notebook2script() ```
github_jupyter
# Quantum Autoencoder <em> Copyright (c) 2021 Institute for Quantum Computing, Baidu Inc. All Rights Reserved. </em> ## Overview This tutorial will show how to train a quantum autoencoder to compress and reconstruct a given quantum state (mixed state) [1]. ### Theory The form of the quantum autoencoder is very similar to the classical autoencoder, which is composed of an encoder $E$ and a decoder $D$. For the input quantum state $\rho_{in}$ of the $N$ qubit system (here we use the density operator representation of quantum mechanics to describe the mixed state), first use the encoder $E = U(\theta)$ to encode information into some of the qubits in the system. This part of qubits is denoted by **system $A$**. After measuring and discarding the remaining qubits (this part is denoted by **system $B$**), we get the compressed quantum state $\rho_{encode}$! The dimension of the compressed quantum state is the same as the dimension of the quantum system $A$. Suppose we need $N_A$ qubits to describe the system $A$, then the dimension of the encoded quantum state $\rho_{encode}$ is $2^{N_A}\times 2^{N_A}$. Note that the mathematical operation corresponding to the measure-and-discard operation in this step is partial trace. The reader can intuitively treat it as the inverse operation of the tensor product $\otimes$. Let us look at a specific example. Given a quantum state $\rho_A$ of $N_A$ qubits and another quantum state $\rho_B$ of $N_B$ qubits, the quantum state of the entire quantum system composed of subsystems $A$ and $B$ is $\rho_{AB} = \rho_A \otimes \rho_B$, which is a state of $N = N_A + N_B$ qubits. Now we let the entire quantum system evolve under the action of the unitary matrix $U$ for some time to get a new quantum state $\tilde{\rho_{AB}} = U\rho_{AB}U^\dagger$. So if we only want to get the new quantum state $\tilde{\rho_A}$ of quantum subsystem A at this time, what should we do? We simply measure the quantum subsystem $B$ and then discard it. This step of the operation is completed by partial trace $\tilde{\rho_A} = \text{Tr}_B (\tilde{\rho_{AB}})$. With Paddle Quantum, we can call the built-in function `partial_trace(rho_AB, 2**N_A, 2**N_B, 2)` to complete this operation. **Note:** The last parameter is 2, which means that we want to discard quantum system $B$. ![QA-fig-encoder_pipeline](./figures/QA-fig-encoder_pipeline.png) After discussing the encoding process, let us take a look at how decoding is done. To decode the quantum state $\rho_{encode}$, we need to introduce an ancillary system $C$ with the same dimension as the system $B$ and take its initial state as the $|0\dots0\rangle$ state. Then use the decoder $D = U^\dagger(\theta)$ to act on the entire quantum system $A+C$ to decode the compressed information in system A. We hope that the final quantum state $\rho_{out}$ and $\rho_{in}$ are as similar as possible and use Uhlmann-Josza fidelity $F$ to measure the similarity between them. $$ F(\rho_{in}, \rho_{out}) = \left(\operatorname{tr} \sqrt{\sqrt{\rho_{in}} \rho_{out} \sqrt{\rho_{in}}} \right)^{2}. \tag{1} $$ Finally, by optimizing the encoder's parameters, we can improve the fidelity of $\rho_{in}$ and $\rho_{out}$ as much as possible. ## Paddle Quantum Implementation Next, we will use a simple example to show the workflow of the quantum autoencoder. Here we first import the necessary packages. ``` from IPython.core.display import HTML display(HTML("<style>pre { white-space: pre !important; }</style>")) import numpy as np from numpy import diag import scipy import scipy.stats import paddle from paddle import matmul, trace, kron, real from paddle_quantum.circuit import UAnsatz from paddle_quantum.utils import dagger, state_fidelity, partial_trace ``` ### Generating the initial state Let us consider the quantum state $\rho_{in}$ of $N = 3$ qubits. We first encode the information into the two qubits below (system $A$) through the encoder then measure and discard the first qubit (system $B$). Secondly, we introduce another qubit (the new reference system $C$) in state $|0\rangle$ to replace the discarded qubit $B$. Finally, through the decoder, the compressed information in A is restored to $\rho_{out}$. Here, we assume that the initial state is a mixed state and the spectrum of $\rho_{in}$ is $\lambda_i \in \{0.4, 0.2, 0.2, 0.1, 0.1, 0, 0, 0\}$, and then generate the initial state $\rho_{in}$ by applying a random unitary transformation. ``` N_A = 2 # Number of qubits in system A N_B = 1 # Number of qubits in system B N = N_A + N_B # Total number of qubits scipy.random.seed(1) # Fixed random seed V = scipy.stats.unitary_group.rvs(2**N) # Generate a random unitary matrix D = diag([0.4, 0.2, 0.2, 0.1, 0.1, 0, 0, 0]) # Enter the spectrum of the target state rho V_H = V.conj().T # Apply Hermitian transpose rho_in = (V @ D @ V_H).astype('complex128') # Generate rho_in # Initialize the quantum system C rho_C = np.diag([1,0]).astype('complex128') ``` ### Building a quantum neural network Here, we use quantum neural networks (QNN) as encoders and decoders. Suppose system A has $N_A$ qubits, both system $B$ and $C$ have $N_B$ qubits, and the depth of the QNN is $D$. Encoder $E$ acts on the total system composed of systems A and B, and decoder $D$ acts on the total system composed of $A$ and $C$. In this example, $N_{A} = 2$ and $N_{B} = 1$. ``` # Set circuit parameters cir_depth = 6 # Circuit depth block_len = 2 # The length of each block theta_size = N*block_len*cir_depth # The size of the circuit parameter theta # Build the encoder E def Encoder(theta): # Initialize the network with UAnsatz cir = UAnsatz(N) # Build the network by layers for layer_num in range(cir_depth): for which_qubit in range(N): cir.ry(theta[block_len*layer_num*N + which_qubit], which_qubit) cir.rz(theta[(block_len*layer_num + 1)*N+ which_qubit], which_qubit) for which_qubit in range(N-1): cir.cnot([which_qubit, which_qubit + 1]) cir.cnot([N-1, 0]) return cir ``` ### Configuring the training model: loss function Here, we define the loss function to be $$ Loss = 1-\langle 0...0|\rho_{trash}|0...0\rangle, \tag{2} $$ where $\rho_{trash}$ is the quantum state of the system $B$ discarded after encoding. Then we train the QNN through PaddlePaddle to minimize the loss function. If the loss function reaches 0, the input state and output state will be exactly the same state. This means that we have achieved compression and decompression perfectly, in which case the fidelity of the initial and final states is $F(\rho_{in}, \rho_{out}) = 1$. ``` # Set hyper-parameters N_A = 2 # Number of qubits in system A N_B = 1 # Number of qubits in system B N = N_A + N_B # Total number of qubits LR = 0.2 # Set the learning rate ITR = 100 # Set the number of iterations SEED = 15 # Fixed random number seed for initializing parameters class NET(paddle.nn.Layer): def __init__(self, shape, dtype='float64'): super(NET, self).__init__() # Convert Numpy array to Tensor supported in PaddlePaddle self.rho_in = paddle.to_tensor(rho_in) self.rho_C = paddle.to_tensor(rho_C) self.theta = self.create_parameter(shape=shape, default_initializer=paddle.nn.initializer.Uniform(low=0.0, high=2 * np.pi), dtype=dtype, is_bias=False) # Define loss function and forward propagation mechanism def forward(self): # Generate initial encoder E and decoder D cir = Encoder(self.theta) E = cir.U E_dagger = dagger(E) D = E_dagger D_dagger = E # Encode the quantum state rho_in rho_BA = matmul(matmul(E, self.rho_in), E_dagger) # Take partial_trace() to get rho_encode and rho_trash rho_encode = partial_trace(rho_BA, 2 ** N_B, 2 ** N_A, 1) rho_trash = partial_trace(rho_BA, 2 ** N_B, 2 ** N_A, 2) # Decode the quantum state rho_out rho_CA = kron(self.rho_C, rho_encode) rho_out = matmul(matmul(D, rho_CA), D_dagger) # Calculate the loss function with rho_trash zero_Hamiltonian = paddle.to_tensor(np.diag([1,0]).astype('complex128')) loss = 1 - real(trace(matmul(zero_Hamiltonian, rho_trash))) return loss, self.rho_in, rho_out, cir paddle.seed(SEED) # Generate network net = NET([theta_size]) # Generally speaking, we use Adam optimizer to get relatively good convergence # Of course, it can be changed to SGD or RMS prop. opt = paddle.optimizer.Adam(learning_rate=LR, parameters=net.parameters()) # Optimization loops for itr in range(1, ITR + 1): # Forward propagation for calculating loss function loss, rho_in, rho_out, cir = net() # Use back propagation to minimize the loss function loss.backward() opt.minimize(loss) opt.clear_grad() # Calculate and print fidelity fid = state_fidelity(rho_in.numpy(), rho_out.numpy()) if itr% 10 == 0: print('iter:', itr,'loss:','%.4f'% loss,'fid:','%.4f'% np.square(fid)) if itr == ITR: print("\nThe trained circuit:") print(cir) ``` If the dimension of system A is denoted by $d_A$, it is easy to prove that the maximum fidelity can be achieved by quantum autoencoder is the sum of $d_A$ largest eigenvalues ​​of $\rho_{in}$. In our case $d_A = 4$ and the maximum fidelity is $$ F_{\text{max}}(\rho_{in}, \rho_{out}) = \sum_{j=1}^{d_A} \lambda_j(\rho_{in})= 0.4 + 0.2 + 0.2 + 0.1 = 0.9. \tag{3} $$ After 100 iterations, the fidelity achieved by the quantum autoencoder we trained reaches above 0.89, which is very close to the optimal value. _______ ## References [1] Romero, J., Olson, J. P. & Aspuru-Guzik, A. Quantum autoencoders for efficient compression of quantum data. [Quantum Sci. Technol. 2, 045001 (2017).](https://iopscience.iop.org/article/10.1088/2058-9565/aa8072)
github_jupyter
<a href="https://colab.research.google.com/github/yukinaga/bert_nlp/blob/main/section_4/02_fine_tuning_for_classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ## ファインチューニングによる感情分析 ファインチューニングを活用し、文章の好悪感情を判別できるようにモデルを訓練します。 ## ライブラリのインストール ライブラリTransformers、およびnlpをインストールします。 ``` !pip install transformers !pip install nlp ``` ## モデルとTokenizerの読み込み 事前学習済みのモデルと、これと紐づいたTokenizerを読み込みます。 ``` from transformers import BertForSequenceClassification, BertTokenizerFast sc_model = BertForSequenceClassification.from_pretrained("bert-base-uncased") sc_model.cuda() tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") ``` ## データセットの読み込み ライブラリnlpを使用して、IMDbデータセットを読み込みます。 IMDbデータセットは、25000の映画レビューコメントに、ポジティブかネガティブの好悪感情を表すラベルが付随した、感情分析用のデータセットです。 https://www.imdb.com/interfaces/ 読み込んだIMDbのデータはトークナイザーで処理し、形式を整えます。 ``` from nlp import load_dataset def tokenize(batch): return tokenizer(batch["text"], padding=True, truncation=True) train_data, test_data = load_dataset("imdb", split=["train", "test"]) print(train_data["label"][0], train_data["text"][0]) # 好意的なコメント print(train_data["label"][20000], train_data["text"][20000]) # 否定的なコメント train_data = train_data.map(tokenize, batched=True, batch_size=len(train_data)) train_data.set_format("torch", columns=["input_ids", "attention_mask", "label"]) test_data = test_data.map(tokenize, batched=True, batch_size=len(train_data)) test_data.set_format("torch", columns=["input_ids", "attention_mask", "label"]) ``` ## 評価用の関数 `sklearn.metrics`を使用し、モデルを評価するための関数を定義します。 ``` from sklearn.metrics import accuracy_score def compute_metrics(result): labels = result.label_ids preds = result.predictions.argmax(-1) acc = accuracy_score(labels, preds) return { "accuracy": acc, } ``` ## Trainerの設定 Trainerクラス、およびTrainingArgumentsクラスを使用して、訓練を行うTrainerの設定を行います。 https://huggingface.co/transformers/main_classes/trainer.html https://huggingface.co/transformers/main_classes/trainer.html#trainingarguments ``` from transformers import Trainer, TrainingArguments training_args = TrainingArguments( output_dir = "./results", num_train_epochs = 1, per_device_train_batch_size = 8, per_device_eval_batch_size = 32, per_gpu_train_batch_size = 8, warmup_steps = 500, # 学習係数が0からこのステップ数で上昇 weight_decay = 0.01, # 重みの減衰率 # evaluate_during_training = True, # ここの記述はバージョンによっては必要ありません logging_dir = "./logs", ) trainer = Trainer( model = sc_model, args = training_args, compute_metrics = compute_metrics, train_dataset = train_data, eval_dataset = test_data ) ``` ## モデルの訓練 設定に基づきモデルを訓練します。 ``` trainer.train() ``` ## モデルの評価 Trainerの`evaluate()`メソッドによりモデルを評価します。 ``` trainer.evaluate() ``` ## TensorBoardによる結果の表示 TensorBoardを使って、logsフォルダに格納された学習過程を表示します。 ``` %load_ext tensorboard %tensorboard --logdir logs ```
github_jupyter
### Why ibis? For me, it is mainly about achieving higher performance when handling large data already residing in databases. ibis uses the actual database's compute resources. In contrast, pandas' `read_sql` uses just your PC's resources. It is well known by Python ETL developers that pandas is slow for retrieving records from SQL databases. > High performance execution: Execute at the speed of your backend, not your local computer However, ibis is not without limitations either. Biggest drawback for wider ibis adoption is that it does not support your typical enterprise database platforms, namely IBM DB2 and Microsoft SQL Server (they are working on SQL Server support). These 2 database platforms are widely used at my company. However, it does support PostgreSQL, which IT has recently begun to support. The other minor drawback is it is yet another API syntax that a user will have to learn, although ibis has adopted some of pandas dataframe syntax and SQL's syntax. But as a result, it has a much more feature rich [API](http://ibis-project.org/docs/api.html). So if you are a seasoned pandas and SQL developer, it is relatively easy to get up to speed with ibis. View ibis home [page](http://ibis-project.org/) for other reasons why you may consider ibis. ``` import ibis import os import pandas as pd import psycopg2 ibis.options.interactive = True ``` #### Server details ``` host = 'some_host' port = '5432' db = 'some_db' user = os.environ['some_user'] pwd = os.environ['some_pwd'] ``` #### Define ibis connection object ``` conn = ibis.postgres.connect( url=f'postgresql://{user}:{pwd}@{host}:{port}/{db}' ) ``` #### `conn` object has useful methods ``` conn.list_tables() ``` #### Let's time how long it takes to query a 30K+ row table ``` %%timeit associates = conn.table('associate_master') associates = conn.table('associate_master') ``` #### Number of rows in the associate master table ``` associates.count() ``` #### `associates` is an ibis table expression ``` type(associates) ``` #### We can save it as a pandas dataframe using execute() method ``` df = associates.execute(limit=40000) type(df) df.shape ``` Since ibis objects don't have built-in mechanism to plot your data and pandas does, it is nice to have the ability to convert an ibis table to a pandas dataframe. #### Let's see how long it would take using normal SQL using psycopg2 library ``` %%timeit with psycopg2.connect(host=host, port=port, database=db, user=user, password=pwd) as conn: sql = "select * from public.associate_master" df = pd.read_sql(sql, conn) df.shape ``` On average, pandas' `read_sql` takes several seconds to retrieve 30K rows of data. ibis on average took only microseconds! #### What about ORMs like sqlalchemy? It is common knowledge ORMs are not performant either, but if you want to be convinced... ``` from sqlalchemy import create_engine engine = create_engine(f'postgresql://{user}:{pwd}@{host}:{port}/{db}') ``` **With chunking:** ``` %%timeit df_orm = pd.read_sql_table('associate_master', con=engine, schema='public', chunksize=10000) ``` **Without chunking:** ``` %%timeit df_orm = pd.read_sql_table('associate_master', con=engine, schema='public') ``` Using ORM, it takes several seconds as well.
github_jupyter
# Building, Training and Evaluating Models with TensorFlow Decision Forests ## Overview In this lab, you use TensorFlow Decision Forests (TF-DF) library for the training, evaluation, interpretation and inference of Decision Forest models. ## Learning Objective In this notebook, you learn how to: 1. Train a binary classification Random Forest on a dataset containing numerical, categorical and missing features. 2. Evaluate the model on a test dataset and prepare the model for [TensorFlow Serving](https://www.tensorflow.org/tfx/guide/serving). 3. Examine the overall structure of the model and the importance of each feature. 4. Re-train the model with a different learning algorithm (Gradient Boosted Decision Trees) and use a different set of input features. 5. Change the hyperparameters of the model. 6. Preprocess the features and train a model for regression. 7. Train a model for ranking. ## Introduction This tutorial shows how to use TensorFlow Decision Forests (TF-DF) library for the training, evaluation, interpretation and inference of Decision Forest models. Decision Forests (DF) are a large family of Machine Learning algorithms for supervised classification, regression and ranking. As the name suggests, DFs use decision trees as a building block. Today, the two most popular DF training algorithms are [Random Forests](https://en.wikipedia.org/wiki/Random_forest) and [Gradient Boosted Decision Trees](https://en.wikipedia.org/wiki/Gradient_boosting). Both algorithms are ensemble techniques that use multiple decision trees, but differ on how they do it. Each learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/train_models_with_tensorFlow_decision_forests.ipynb). ## Installing TensorFlow Decision Forests Install TF-DF by running the following cell. ``` # Install the specified package !pip install tensorflow_decision_forests ``` **Please ignore incompatible errors.** Install [Wurlitzer](https://pypi.org/project/wurlitzer/) to display the detailed training logs. This is only needed in colabs. ``` # Install the specified package !pip install wurlitzer ``` ## Importing libraries ``` # Import necessary libraries import tensorflow_decision_forests as tfdf import os import numpy as np import pandas as pd import tensorflow as tf import math try: from wurlitzer import sys_pipes except: from colabtools.googlelog import CaptureLog as sys_pipes from IPython.core.magic import register_line_magic from IPython.display import Javascript ``` The hidden code cell limits the output height in colab. ``` # Some of the model training logs can cover the full # screen if not compressed to a smaller viewport. # This magic allows setting a max height for a cell. @register_line_magic def set_cell_height(size): display( Javascript("google.colab.output.setIframeHeight(0, true, {maxHeight: " + str(size) + "})")) # Check the version of TensorFlow Decision Forests print("Found TensorFlow Decision Forests v" + tfdf.__version__) ``` ## Training a Random Forest model In this section, we train, evaluate, analyse and export a binary classification Random Forest trained on the [Palmer's Penguins](https://allisonhorst.github.io/palmerpenguins/articles/intro.html) dataset. <center> <img src="https://allisonhorst.github.io/palmerpenguins/man/figures/palmerpenguins.png" width="150"/></center> **Note:** The dataset was exported to a csv file without pre-processing: `library(palmerpenguins); write.csv(penguins, file="penguins.csv", quote=F, row.names=F)`. ### Load the dataset and convert it in a tf.Dataset This dataset is very small (300 examples) and stored as a .csv-like file. Therefore, use Pandas to load it. **Note:** Pandas is practical as you don't have to type in name of the input features to load them. For larger datasets (>1M examples), using the [TensorFlow Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) to read the files may be better suited. Let's assemble the dataset into a csv file (i.e. add the header), and load it: ``` # Download the dataset !wget -q https://storage.googleapis.com/download.tensorflow.org/data/palmer_penguins/penguins.csv -O /tmp/penguins.csv # Load a dataset into a Pandas Dataframe. dataset_df = pd.read_csv("/tmp/penguins.csv") # Display the first 3 examples. dataset_df.head(3) ``` The dataset contains a mix of numerical (e.g. `bill_depth_mm`), categorical (e.g. `island`) and missing features. TF-DF supports all these feature types natively (differently than NN based models), therefore there is no need for preprocessing in the form of one-hot encoding, normalization or extra `is_present` feature. Labels are a bit different: Keras metrics expect integers. The label (`species`) is stored as a string, so let's convert it into an integer. ``` # Encode the categorical label into an integer. # # Details: # This stage is necessary if your classification label is represented as a # string. Note: Keras expected classification labels to be integers. # Name of the label column. label = "species" classes = dataset_df[label].unique().tolist() print(f"Label classes: {classes}") dataset_df[label] = dataset_df[label].map(classes.index) ``` Next split the dataset into training and testing: ``` # Split the dataset into a training and a testing dataset. def split_dataset(dataset, test_ratio=0.30): """Splits a panda dataframe in two.""" test_indices = np.random.rand(len(dataset)) < test_ratio return dataset[~test_indices], dataset[test_indices] train_ds_pd, test_ds_pd = split_dataset(dataset_df) print("{} examples in training, {} examples for testing.".format( len(train_ds_pd), len(test_ds_pd))) ``` And finally, convert the pandas dataframe (`pd.Dataframe`) into tensorflow datasets (`tf.data.Dataset`): ``` train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label) test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label) ``` **Notes:** `pd_dataframe_to_tf_dataset` could have converted the label to integer for you. And, if you wanted to create the `tf.data.Dataset` yourself, there is a couple of things to remember: - The learning algorithms work with a one-epoch dataset and without shuffling. - The batch size does not impact the training algorithm, but a small value might slow down reading the dataset. ### Train the model ``` %set_cell_height 300 # Specify the model. model_1 = tfdf.keras.RandomForestModel() # Optionally, add evaluation metrics. model_1.compile( metrics=["accuracy"]) # Train the model. # "sys_pipes" is optional. It enables the display of the training logs. # TODO with sys_pipes(): model_1.fit(x=train_ds) ``` ### Remarks - No input features are specified. Therefore, all the columns will be used as input features except for the label. The feature used by the model are shown in the training logs and in the `model.summary()`. - DFs consume natively numerical, categorical, categorical-set features and missing-values. Numerical features do not need to be normalized. Categorical string values do not need to be encoded in a dictionary. - No training hyper-parameters are specified. Therefore the default hyper-parameters will be used. Default hyper-parameters provide reasonable results in most situations. - Calling `compile` on the model before the `fit` is optional. Compile can be used to provide extra evaluation metrics. - Training algorithms do not need validation datasets. If a validation dataset is provided, it will only be used to show metrics. **Note:** A *Categorical-Set* feature is composed of a set of categorical values (while a *Categorical* is only one value). More details and examples are given later. ## Evaluate the model Let's evaluate our model on the test dataset. ``` # TODO # Evaluate the model evaluation = model_1.evaluate(test_ds, return_dict=True) print() for name, value in evaluation.items(): print(f"{name}: {value:.4f}") ``` **Remark:** The test accuracy is close to the Out-of-bag accuracy shown in the training logs. See the **Model Self Evaluation** section below for more evaluation methods. ## Prepare this model for TensorFlow Serving. Export the model to the SavedModel format for later re-use e.g. [TensorFlow Serving](https://www.tensorflow.org/tfx/guide/serving). ``` # Save the model model_1.save("/tmp/my_saved_model") ``` ## Plot the model Plotting a decision tree and following the first branches helps learning about decision forests. In some cases, plotting a model can even be used for debugging. Because of the difference in the way they are trained, some models are more interresting to plan than others. Because of the noise injected during training and the depth of the trees, plotting Random Forest is less informative than plotting a CART or the first tree of a Gradient Boosted Tree. Never the less, let's plot the first tree of our Random Forest model: ``` # Plot the first tree of the model tfdf.model_plotter.plot_model_in_colab(model_1, tree_idx=0, max_depth=3) ``` The root node on the left contains the first condition (`bill_depth_mm >= 16.55`), number of examples (240) and label distribution (the red-blue-green bar). Examples that evaluates true to `bill_depth_mm >= 16.55` are branched to the green path. The other ones are branched to the red path. The deeper the node, the more `pure` they become i.e. the label distribution is biased toward a subset of classes. **Note:** Over the mouse on top of the plot for details. ## Model tructure and feature importance The overall structure of the model is show with `.summary()`. You will see: - **Type**: The learning algorithm used to train the model (`Random Forest` in our case). - **Task**: The problem solved by the model (`Classification` in our case). - **Input Features**: The input features of the model. - **Variable Importance**: Different measures of the importance of each feature for the model. - **Out-of-bag evaluation**: The out-of-bag evaluation of the model. This is a cheap and efficient alternative to cross-validation. - **Number of {trees, nodes} and other metrics**: Statistics about the structure of the decisions forests. **Remark:** The summary's content depends on the learning algorithm (e.g. Out-of-bag is only available for Random Forest) and the hyper-parameters (e.g. the *mean-decrease-in-accuracy* variable importance can be disabled in the hyper-parameters). ``` # Print the overall structure of the model %set_cell_height 300 model_1.summary() ``` The information in ``summary`` are all available programatically using the model inspector: ``` # The input features model_1.make_inspector().features() # The feature importances model_1.make_inspector().variable_importances() ``` The content of the summary and the inspector depends on the learning algorithm (`tfdf.keras.RandomForestModel` in this case) and its hyper-parameters (e.g. `compute_oob_variable_importances=True` will trigger the computation of Out-of-bag variable importances for the Random Forest learner). ## Model Self Evaluation During training TFDF models can self evaluate even if no validation dataset is provided to the `fit()` method. The exact logic depends on the model. For example, Random Forest will use Out-of-bag evaluation while Gradient Boosted Trees will use internal train-validation. **Note:** While this evaluation is computed during training, it is NOT computed on the training dataset and can be used as a low quality evaluation. The model self evaluation is available with the inspector's `evaluation()`: ``` # TODO # Evaluate the model model_1.make_inspector().evaluation() ``` ## Plotting the training logs The training logs show the quality of the model (e.g. accuracy evaluated on the out-of-bag or validation dataset) according to the number of trees in the model. These logs are helpful to study the balance between model size and model quality. The logs are available in multiple ways: 1. Displayed in during training if `fit()` is wrapped in `with sys_pipes():` (see example above). 1. At the end of the model summary i.e. `model.summary()` (see example above). 1. Programmatically, using the model inspector i.e. `model.make_inspector().training_logs()`. 1. Using [TensorBoard](https://www.tensorflow.org/tensorboard) Let's try the options 2 and 3: ``` %set_cell_height 150 model_1.make_inspector().training_logs() ``` Let's plot it: ``` # Import necessary libraries import matplotlib.pyplot as plt logs = model_1.make_inspector().training_logs() # Plot the logs plt.figure(figsize=(12, 4)) plt.subplot(1, 2, 1) plt.plot([log.num_trees for log in logs], [log.evaluation.accuracy for log in logs]) plt.xlabel("Number of trees") plt.ylabel("Accuracy (out-of-bag)") plt.subplot(1, 2, 2) plt.plot([log.num_trees for log in logs], [log.evaluation.loss for log in logs]) plt.xlabel("Number of trees") plt.ylabel("Logloss (out-of-bag)") plt.show() ``` This dataset is small. You can see the model converging almost immediately. Let's use TensorBoard: ``` # This cell start TensorBoard that can be slow. # Load the TensorBoard notebook extension %load_ext tensorboard # Google internal version # %load_ext google3.learning.brain.tensorboard.notebook.extension # Clear existing results (if any) !rm -fr "/tmp/tensorboard_logs" # Export the meta-data to tensorboard. model_1.make_inspector().export_to_tensorboard("/tmp/tensorboard_logs") # docs_infra: no_execute # Start a tensorboard instance. %tensorboard --logdir "/tmp/tensorboard_logs" ``` <!-- <img class="tfo-display-only-on-site" src="images/beginner_tensorboard.png"/> --> ## Re-train the model with a different learning algorithm The learning algorithm is defined by the model class. For example, `tfdf.keras.RandomForestModel()` trains a Random Forest, while `tfdf.keras.GradientBoostedTreesModel()` trains a Gradient Boosted Decision Trees. The learning algorithms are listed by calling `tfdf.keras.get_all_models()` or in the [learner list](https://github.com/google/yggdrasil-decision-forests/manual/learners). ``` # List all algorithms tfdf.keras.get_all_models() ``` The description of the learning algorithms and their hyper-parameters are also available in the [API reference](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf) and builtin help: ``` # help works anywhere. help(tfdf.keras.RandomForestModel) # ? only works in ipython or notebooks, it usually opens on a separate panel. tfdf.keras.RandomForestModel? ``` ## Using a subset of features The previous example did not specify the features, so all the columns were used as input feature (except for the label). The following example shows how to specify input features. ``` feature_1 = tfdf.keras.FeatureUsage(name="bill_length_mm") feature_2 = tfdf.keras.FeatureUsage(name="island") all_features = [feature_1, feature_2] # Note: This model is only trained with two features. It will not be as good as # the one trained on all features. # TODO model_2 = tfdf.keras.GradientBoostedTreesModel( features=all_features, exclude_non_specified_features=True) model_2.compile(metrics=["accuracy"]) model_2.fit(x=train_ds, validation_data=test_ds) print(model_2.evaluate(test_ds, return_dict=True)) ``` **Note:** As expected, the accuracy is lower than previously. **TF-DF** attaches a **semantics** to each feature. This semantics controls how the feature is used by the model. The following semantics are currently supported: - **Numerical**: Generally for quantities or counts with full ordering. For example, the age of a person, or the number of items in a bag. Can be a float or an integer. Missing values are represented with float(Nan) or with an empty sparse tensor. - **Categorical**: Generally for a type/class in finite set of possible values without ordering. For example, the color RED in the set {RED, BLUE, GREEN}. Can be a string or an integer. Missing values are represented as "" (empty sting), value -2 or with an empty sparse tensor. - **Categorical-Set**: A set of categorical values. Great to represent tokenized text. Can be a string or an integer in a sparse tensor or a ragged tensor (recommended). The order/index of each item doesn't matter. If not specified, the semantics is inferred from the representation type and shown in the training logs: - int, float (dense or sparse) → Numerical semantics. - str (dense or sparse) → Categorical semantics - int, str (ragged) → Categorical-Set semantics In some cases, the inferred semantics is incorrect. For example: An Enum stored as an integer is semantically categorical, but it will be detected as numerical. In this case, you should specify the semantic argument in the input. The `education_num` field of the Adult dataset is classical example. This dataset doesn't contain such a feature. However, for the demonstration, we will make the model treat the `year` as a categorical feature: ``` # Define the features %set_cell_height 300 feature_1 = tfdf.keras.FeatureUsage(name="year", semantic=tfdf.keras.FeatureSemantic.CATEGORICAL) feature_2 = tfdf.keras.FeatureUsage(name="bill_length_mm") feature_3 = tfdf.keras.FeatureUsage(name="sex") all_features = [feature_1, feature_2, feature_3] model_3 = tfdf.keras.GradientBoostedTreesModel(features=all_features, exclude_non_specified_features=True) model_3.compile( metrics=["accuracy"]) with sys_pipes(): model_3.fit(x=train_ds, validation_data=test_ds) ``` Note that `year` is in the list of CATEGORICAL features (unlike the first run). ## Hyper-parameters **Hyper-parameters** are parameters of the training algorithm that impact the quality of the final model. They are specified in the model class constructor. The list of hyper-parameters is visible with the *question mark* colab command (e.g. `?tfdf.keras.GradientBoostedTreesModel`). Alternatively, you can find them on the [TensorFlow Decision Forest Github](https://github.com/tensorflow/decision-forests/keras/wrappers_pre_generated.py) or the [Yggdrasil Decision Forest documentation](https://github.com/google/yggdrasil_decision_forests/documentation/learners). The default hyper-parameters of each algorithm matches approximatively the initial publication paper. To ensure consistancy, new features and their matching hyper-parameters are always disable by default. That's why it is a good idea to tune your hyper-parameters. ``` # A classical but slighly more complex model. model_6 = tfdf.keras.GradientBoostedTreesModel( num_trees=500, growing_strategy="BEST_FIRST_GLOBAL", max_depth=8) model_6.fit(x=train_ds) # TODO # A more complex, but possibly, more accurate model. model_7 = tfdf.keras.GradientBoostedTreesModel( num_trees=500, growing_strategy="BEST_FIRST_GLOBAL", max_depth=8, split_axis="SPARSE_OBLIQUE", categorical_algorithm="RANDOM", ) model_7.fit(x=train_ds) ``` As new training methods are published and implemented, combinaisons of hyper-parameters can emerge as good or almost-always-better than the default parameters. To avoid changing the default hyper-parameter values these good combinaisons are indexed and available as hyper-parameter templates. For example, the `benchmark_rank1` template is the best combinaison on our internal benchmarks. Those templates are versioned to allow training configuration stability e.g. `benchmark_rank1@v1`. ``` # A good template of hyper-parameters. model_8 = tfdf.keras.GradientBoostedTreesModel(hyperparameter_template="benchmark_rank1") model_8.fit(x=train_ds) ``` The available tempaltes are available with `predefined_hyperparameters`. Note that different learning algorithms have different templates, even if the name is similar. ``` # The hyper-parameter templates of the Gradient Boosted Tree model. print(tfdf.keras.GradientBoostedTreesModel.predefined_hyperparameters()) ``` ## Feature Preprocessing Pre-processing features is sometimes necessary to consume signals with complex structures, to regularize the model or to apply transfer learning. Pre-processing can be done in one of three ways: 1. Preprocessing on the Pandas dataframe. This solution is easy to implement and generally suitable for experimentation. However, the pre-processing logic will not be exported in the model by `model.save()`. 2. [Keras Preprocessing](https://keras.io/guides/preprocessing_layers/): While more complex than the previous solution, Keras Preprocessing is packaged in the model. 3. [TensorFlow Feature Columns](https://www.tensorflow.org/tutorials/structured_data/feature_columns): This API is part of the TF Estimator library (!= Keras) and planned for deprecation. This solution is interesting when using existing preprocessing code. Note: Using [TensorFlow Hub](https://www.tensorflow.org/hub) pre-trained embedding is often, a great way to consume text and image with TF-DF. For example, `hub.KerasLayer("https://tfhub.dev/google/nnlm-en-dim128/2")`. See the [Intermediate tutorial](intermediate_colab.ipynb) for more details. In the next example, pre-process the `body_mass_g` feature into `body_mass_kg = body_mass_g / 1000`. The `bill_length_mm` is consumed without pre-processing. Note that such monotonic transformations have generally no impact on decision forest models. ``` %set_cell_height 300 body_mass_g = tf.keras.layers.Input(shape=(1,), name="body_mass_g") body_mass_kg = body_mass_g / 1000.0 bill_length_mm = tf.keras.layers.Input(shape=(1,), name="bill_length_mm") raw_inputs = {"body_mass_g": body_mass_g, "bill_length_mm": bill_length_mm} processed_inputs = {"body_mass_kg": body_mass_kg, "bill_length_mm": bill_length_mm} # "preprocessor" contains the preprocessing logic. preprocessor = tf.keras.Model(inputs=raw_inputs, outputs=processed_inputs) # "model_4" contains both the pre-processing logic and the decision forest. model_4 = tfdf.keras.RandomForestModel(preprocessing=preprocessor) model_4.fit(x=train_ds) model_4.summary() ``` The following example re-implements the same logic using TensorFlow Feature Columns. ``` def g_to_kg(x): return x / 1000 feature_columns = [ tf.feature_column.numeric_column("body_mass_g", normalizer_fn=g_to_kg), tf.feature_column.numeric_column("bill_length_mm"), ] preprocessing = tf.keras.layers.DenseFeatures(feature_columns) model_5 = tfdf.keras.RandomForestModel(preprocessing=preprocessing) model_5.compile(metrics=["accuracy"]) model_5.fit(x=train_ds) ``` ## Training a regression model The previous example trains a classification model (TF-DF does not differentiate between binary classification and multi-class classification). In the next example, train a regression model on the [Abalone dataset](https://archive.ics.uci.edu/ml/datasets/abalone). The objective of this dataset is to predict the number of shell's rings of an abalone. **Note:** The csv file is assembled by appending UCI's header and data files. No preprocessing was applied. <center> <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/33/LivingAbalone.JPG/800px-LivingAbalone.JPG" width="200"/></center> ``` # Download the dataset. !wget -q https://storage.googleapis.com/download.tensorflow.org/data/abalone_raw.csv -O /tmp/abalone.csv dataset_df = pd.read_csv("/tmp/abalone.csv") print(dataset_df.head(3)) # Split the dataset into a training and testing dataset. train_ds_pd, test_ds_pd = split_dataset(dataset_df) print("{} examples in training, {} examples for testing.".format( len(train_ds_pd), len(test_ds_pd))) # Name of the label column. label = "Rings" train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label, task=tfdf.keras.Task.REGRESSION) test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label, task=tfdf.keras.Task.REGRESSION) %set_cell_height 300 # TODO # Configure the regression model. model_7 = tfdf.keras.RandomForestModel(task = tfdf.keras.Task.REGRESSION) # Optional. model_7.compile(metrics=["mse"]) # Train the model. with sys_pipes(): model_7.fit(x=train_ds) # Evaluate the model on the test dataset. evaluation = model_7.evaluate(test_ds, return_dict=True) print(evaluation) print() print(f"MSE: {evaluation['mse']}") print(f"RMSE: {math.sqrt(evaluation['mse'])}") ``` ## Training a ranking model Finaly, after having trained a classification and a regression models, train a [ranking](https://en.wikipedia.org/wiki/Learning_to_rank) model. The goal of a ranking is to **order** items by importance. The "value" of relevance does not matter directly. Ranking a set of *documents* with regard to a user *query* is an example of ranking problem: It is only important to get the right order, where the top documents matter more. TF-DF expects for ranking datasets to be presented in a "flat" format. A document+query dataset might look like that: query | document_id | feature_1 | feature_2 | relevance/label ----- | ----------- | --------- | --------- | --------------- cat | 1 | 0.1 | blue | 4 cat | 2 | 0.5 | green | 1 cat | 3 | 0.2 | red | 2 dog | 4 | NA | red | 0 dog | 5 | 0.2 | red | 1 dog | 6 | 0.6 | green | 1 The *relevance/label* is a floating point numerical value between 0 and 5 (generally between 0 and 4) where 0 means "completely unrelated", 4 means "very relevant" and 5 means "the same as the query". Interestingly, decision forests are often good rankers, and many state-of-the-art ranking models are decision forests. In this example, use a sample of the [LETOR3](https://www.microsoft.com/en-us/research/project/letor-learning-rank-information-retrieval/#!letor-3-0) dataset. More precisely, we want to download the `OHSUMED.zip` from [the LETOR3 repo](https://onedrive.live.com/?authkey=%21ACnoZZSZVfHPJd0&id=8FEADC23D838BDA8%21107&cid=8FEADC23D838BDA8). This dataset is stored in the libsvm format, so we will need to convert it to csv. ``` %set_cell_height 200 archive_path = tf.keras.utils.get_file("letor.zip", "https://download.microsoft.com/download/E/7/E/E7EABEF1-4C7B-4E31-ACE5-73927950ED5E/Letor.zip", extract=True) # Path to the train and test dataset using libsvm format. raw_dataset_path = os.path.join(os.path.dirname(archive_path),"OHSUMED/Data/All/OHSUMED.txt") ``` The dataset is stored as a .txt file in a specific format, so first convert it into a csv file. ``` def convert_libsvm_to_csv(src_path, dst_path): """Converts a libsvm ranking dataset into a flat csv file. Note: This code is specific to the LETOR3 dataset. """ dst_handle = open(dst_path, "w") first_line = True for src_line in open(src_path,"r"): # Note: The last 3 items are comments. items = src_line.split(" ")[:-3] relevance = items[0] group = items[1].split(":")[1] features = [ item.split(":") for item in items[2:]] if first_line: # Csv header dst_handle.write("relevance,group," + ",".join(["f_" + feature[0] for feature in features]) + "\n") first_line = False dst_handle.write(relevance + ",g_" + group + "," + (",".join([feature[1] for feature in features])) + "\n") dst_handle.close() # Convert the dataset. csv_dataset_path="/tmp/ohsumed.csv" convert_libsvm_to_csv(raw_dataset_path, csv_dataset_path) # Load a dataset into a Pandas Dataframe. dataset_df = pd.read_csv(csv_dataset_path) # Display the first 3 examples. dataset_df.head(3) train_ds_pd, test_ds_pd = split_dataset(dataset_df) print("{} examples in training, {} examples for testing.".format( len(train_ds_pd), len(test_ds_pd))) # Display the first 3 examples of the training dataset. train_ds_pd.head(3) ``` In this dataset, the `relevance` defines the ground-truth rank among rows of the same `group`. ``` # Name of the relevance and grouping columns. relevance = "relevance" ranking_train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=relevance, task=tfdf.keras.Task.RANKING) ranking_test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=relevance, task=tfdf.keras.Task.RANKING) %set_cell_height 400 # TODO # Define the ranking model model_8 = tfdf.keras.GradientBoostedTreesModel( task=tfdf.keras.Task.RANKING, ranking_group="group", num_trees=50) with sys_pipes(): model_8.fit(x=ranking_train_ds) ``` At this point, keras does not propose any ranking metrics. Instead, the training and validation (a GBDT uses a validation dataset) are shown in the training logs. In this case the loss is `LAMBDA_MART_NDCG5`, and the final (i.e. at the end of the training) NDCG (normalized discounted cumulative gain) is `0.510136` (see line `Final model valid-loss: -0.510136`). Note that the NDCG is a value between 0 and 1. The larget the NDCG, the better the model. For this reason, the loss to be -NDCG. As before, the model can be analysed: ``` # Print the summary of the model %set_cell_height 400 model_8.summary() ```
github_jupyter
``` %run ./resources/library.py style_notebook() ``` # Notebook 3: Exploring TB and Socio-economic Indicators, Part 2 ## Review Goals Our goal for this TB exploration notebook is to construct a "gapminder" for TB data and a time series choropleth map. See figures below. ![Gapminder for TB](images/gapminder-for-tb.png) **Figure 1**. Interactive Gapminder bubble chart for TB data ![Choropleth Timeseries for TB](images/choropleth-timeseries-tb.png) **Figure 2**. Interactive choropleth map for TB data ## Steps in this notebook Notebook 3 3. Merge, clean, explore dataframes 4. Generate plotly express visualizations ## STEP 1 (Review). Set up data exploration environment To create visualizations, we will use Plotly Express. You can learn more about Plotly Express from: 1. [Plotly GitHub Site](https://github.com/plotly/plotly_express) (GitHub) 2. [Jupyter Notebook Example](https://www.plotly.express/) 3. Some articles written about it [here](https://medium.com/@plotlygraphs/introducing-plotly-express-808df010143d) and [here](https://towardsdatascience.com/plotly-express-the-good-the-bad-and-the-ugly-dc941649687c) ``` import plotly.express as px import plotly plotly.__version__ ``` We will also use our Swiss Army knife for data science, `pandas`. ``` import pandas as pd pd.__version__ pd.set_option('max_colwidth', 150) pd.set_option('display.max_columns', 50) pd.set_option('display.max_rows', 100) ``` **`missingno`**: Missing data visualization module for Python Read more about it here: https://github.com/ResidentMario/missingno ``` import missingno as mno mno.__version__ %matplotlib inline import numpy as np np.__version__ ``` ## STEP 2. Download and clean up data Since we have created all the necessary dataframes in Notebook 2 and pickled these, we will just load the pickle files here. All data pickle files are found in the `outputs` folder. Load TB data: ``` tb_df2 = pd.read_pickle('outputs/tb_df2.pickle') tb_dict_df = pd.read_pickle('outputs/tb_dict_df.pickle') ``` Load World Bank GDP, GDP per capita data, and country metadata: ``` wb_gdp_df5 = pd.read_pickle('outputs/wb_gdp_df5.pickle') wb_gdppc_df5 = pd.read_pickle('outputs/wb_gdppc_df5.pickle') wb_gdp_meta_df3 = pd.read_pickle('outputs/wb_gdp_meta_df3.pickle') ``` Load World Bank education data (literacy rate, primary completion rate) and indicator metadata: ``` wb_ed_df2 = pd.read_pickle('outputs/wb_ed_df2.pickle') wb_edmeta_df1 = pd.read_pickle('outputs/wb_edmeta_df1.pickle') wb_ed_primcomp_df4 = pd.read_pickle('outputs/wb_ed_primcomp_df4.pickle') ``` ## STEP 3. Clean, merge, explore data sets ### GDP data #### Merge #1: Merge WB GDP data and metadata dataframes on `ISO_Alpha` ``` merged_df1 = \ pd.merge(wb_gdp_meta_df3, wb_gdp_df5, on=['ISO_Alpha'], how='inner').\ drop(columns=['Country_y']) merged_df1.head() merged_df2 = merged_df1.rename(columns={'Country_x': 'Country'}) merged_df2.head() ``` #### Merge #2: Merge WB GDP and GDP per capita data ``` merged_df3 = \ pd.merge(wb_gdppc_df5, merged_df2, on=['ISO_Alpha','Year'], how='inner').\ drop(columns=['Country_y']) merged_df3 merged_df4 = merged_df3.rename(columns={'Country_x': 'Country'}) merged_df4 mno.bar(merged_df4) tb_nonmatch = np.setdiff1d(tb_df2.ISO_Alpha.unique(), merged_df4.ISO_Alpha.unique()) tb_nonmatch ``` It seems we have a few countries that did not match from the two dataframes. Let's take a closer look, and view the country names. ``` for iso3 in tb_nonmatch: print(iso3, tb_df2.query("ISO_Alpha=='"+iso3+"'")['Country'].unique()) ``` Do you know where these countries are? #### Merge #3: Merge WB GDP, GDP per capita and primary completion rate ``` merged_df5 = \ pd.merge(wb_ed_primcomp_df4, merged_df4, on=['ISO_Alpha','Year'], how='inner').\ drop(columns=['Country_y']) merged_df5 merged_df6 = \ merged_df5.rename(\ columns={'Country_x': 'Country','Value':'Primary-Completion-Rate'}) merged_df6 ``` #### Merge #4: Merge TB data with WB GDP data ``` merged_df7 = pd.merge(merged_df6, tb_df2, on=['ISO_Alpha', 'Year'], how='inner') merged_df8 = merged_df7.rename(columns={'Country_x': 'Country'}) merged_df8.info() mno.bar(merged_df8) mno.heatmap(merged_df8) ``` ### 2016 subset of second-merge dataframe ``` is_2016 = merged_df8['Year'] == 2016 merged_df8_2016 = merged_df8[is_2016] merged_df8_2016.info() ``` #### Pickle merged files ``` merged_df8_2016.to_pickle('outputs/merged_df8_2016.pickle') merged_df4.to_pickle('outputs/merged_df4.pickle') merged_df8.to_pickle('outputs/merged_df8.pickle') ``` ## You finished Notebook 3! Please proceed to Notebook 4 to see the visualizations.
github_jupyter
# Demagnetisation using periodic boundary conditions ## Setting the simulation ``` import dolfin as df import numpy as np import matplotlib.pyplot as plt from finmag import Simulation as Sim from finmag.energies import Demag from finmag import MacroGeometry %matplotlib inline ``` The mesh unit cell is a box with edge length $a$ and number of vertices along one dimension $N_{v}$. ``` a = 5 # edge length (nm) Nv = 5 # number of verices along one direction mesh = df.BoxMesh(-a/2., -a/2., -a/2., a/2., a/2., a/2., Nv, Nv, Nv) ``` The simulation object is created with saturation magnetisation $M_\text{s} = 10^6$ A/m. ``` Ms = 1e6 # saturation magnetisation (A/m) sim = Sim(mesh, Ms, unit_length=1e-9) ``` Demag object is created using already defined lattice and added to the simulation: ``` n = 5 demag = Demag(macrogeometry=MacroGeometry(nx=n)) sim.add(demag) ``` Now, the demagnetisation field can be computed for different magnetisation configurations. For instance: ``` sim.set_m((0, 0, 1)) field = sim.llg.effective_field.get_dolfin_function('Demag') ``` At a particular point in the mesh, the field value can be extracted: ``` field_at_point = field(0, 0, 0) print field_at_point ``` ## Demagnetisation field for different number of elements in the lattice An array of possible numbers of ellements in the lattice: ``` ns = np.arange(1, 30, 2) print ns ``` The first part of this notebook implemented as a function which returns demagnetisation field for two different magnetisation configurations (0, 0, 1) and (1, 0, 0): ``` a = 10 # edge length (nm) Nv = 10 # number of verices along one direction mesh = df.BoxMesh(-a/2., -a/2., -a/2., a/2., a/2., a/2., Nv, Nv, Nv) Ms = 1e6 # saturation magnetisation (A/m) sim = Sim(mesh, Ms, unit_length=1e-9) def compute_fields(n): demag = Demag(macrogeometry=MacroGeometry(nx=n)) sim.add(demag) sim.set_m((1, 0, 0)) field1 = sim.llg.effective_field.get_dolfin_function('Demag') sim.set_m((0, 0, 1)) field2 = sim.llg.effective_field.get_dolfin_function('Demag') sim.remove_interaction('Demag') return field1(0, 0, 0)/Ms, field2(0, 0, 0)/Ms ``` Now, the field is computed for different values of $n$ and plotted: ``` field1_list = [] field2_list = [] for i in ns: fields = compute_fields(i) field1_list.append(fields[0][0]) field2_list.append(fields[1][2]) plt.figure(figsize=(10, 7)) plt.subplot(211) plt.plot(ns, field1_list) plt.xlabel('n') plt.ylabel('Hx') plt.grid() plt.subplot(212) plt.plot(ns, field2_list) plt.xlabel('n') plt.ylabel('Hz') plt.grid() ```
github_jupyter
# Introduction This notebook does not run in **binder**. The OOI data system features an extension called Data Explorer. This sub-system is intended to facilitate data exploration and use. For more detail see notebook **Ocean 01 B**. This notebook cleans up Level 1+ NetCDF files obtained from Data Explorer. The results are cleanly sampled at one-minute ("1Min") sample spacing. ``` import os, sys, time, glob, warnings from IPython.display import clear_output # use inside loop with clear_output(wait = True) followed by print(i) warnings.filterwarnings('ignore') this_dir = os.getcwd() data_dir = this_dir + '/../data' from matplotlib import pyplot as plt from matplotlib import colors as mplcolors import numpy as np, pandas as pd, xarray as xr from numpy import datetime64 as dt64, timedelta64 as td64 # convenience functions abbreviating 'datetime64' and so on def doy(theDatetime): return 1 + int((theDatetime - dt64(str(theDatetime)[0:4] + '-01-01')) / td64(1, 'D')) def dt64_from_doy(year, doy): return dt64(str(year) + '-01-01') + td64(doy-1, 'D') def day_of_month_to_string(d): return str(d) if d > 9 else '0' + str(d) print('\nJupyter Notebook running Python {}'.format(sys.version_info[0])) ############################################################ # shallow profiler dive timestamp generator ############################################################ # Datasets extend over the full program from 2014 to download date Aug 2021. # At first look they appear sampled at 1/minute but there are anomalies; # so to have more confidence this code resamples them at "1Min". def StandardizeNetCDFDataset(source_location, datafile): ds = xr.open_dataset(source_location + datafile) ds = ds.set_coords("time") ds = ds.swap_dims({"row":"time"}) df = ds.to_dataframe().resample("1Min").mean() vals = [xr.DataArray(data=df[c], dims=['time'], coords={'time':df.index}, attrs=ds[c].attrs) for c in df.columns] return xr.Dataset(dict(zip(df.columns, vals)), attrs=ds.attrs) data_root = os.getenv("HOME") + '/data/data_ooi_data_explorer/' out_root = os.getenv("HOME") + '/data/data_explorer_1Min/' sitekeys = ['/axb/', '/oos/', '/osb/'] framekeys = ['/profiler/', '/platform/'] n_sites = len(sitekeys) n_frames = len(framekeys) for i in range(n_sites): for j in range(n_frames): this_data_path = data_root + sitekeys[i] + framekeys[j] this_out_path = out_root + sitekeys[i] + framekeys[j] possible_datafiles = os.listdir(this_data_path) datafiles = [] for poss in possible_datafiles: if poss.split('.')[1] == 'nc': datafiles.append(poss) for datafile in datafiles: ds = StandardizeNetCDFDataset(this_data_path, datafile) outfile = this_out_path + datafile.split('.')[0] + '_1Min.nc' ds.to_netcdf(outfile) print(datafile, '>', outfile, 'with size', ds.time.shape[0]) ```
github_jupyter
# BERT **Bidirectional Encoder Representations from Transformers.** _ | _ - | - ![alt](https://pytorch.org/assets/images/bert1.png) | ![alt](https://pytorch.org/assets/images/bert2.png) ### **Overview** BERT was released together with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin *et al.* The model is based on the Transformer architecture introduced in [Attention Is All You Need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani *et al.* and has led to significant improvements in a wide range of natural language tasks. At the highest level, BERT maps from a block of text to a numeric vector which summarizes the relevant information in the text. What is remarkable is that numeric summary is sufficiently informative that, for example, the numeric summary of a paragraph followed by a reading comprehension question contains all the information necessary to satisfactorily answer the question. #### **Transfer Learning** BERT is a great example of a paradigm called *transfer learning*, which has proved very effective in recent years. In the first step, a network is trained on an unsupervised task using massive amounts of data. In the case of BERT, it was trained to predict missing words and to detect when pairs of sentences are presented in reversed order using all of Wikipedia. This was initially done by Google, using intense computational resources. Once this network has been trained, it is then used to perform many other supervised tasks using only limited data and computational resources: for example, sentiment classification in tweets or quesiton answering. The network is re-trained to perform these other tasks in such a way that only the final, output parts of the network are allowed to adjust by very much, so that most of the "information'' originally learned the network is preserved. This process is called *fine tuning*. ##Getting to know BERT BERT, and many of its variants, are made avialable to the public by the open source [Huggingface Transformers](https://huggingface.co/transformers/) project. This is an amazing resource, giving researchers and practitioners easy-to-use access to this technology. In order to use BERT for modeling, we simply need to download the pre-trained neural network and fine tune it on our dataset, which is illustrated below. ``` !pip install transformers import tensorflow as tf import numpy as np import pandas as pd from transformers import TFBertModel, BertTokenizer # Formatting tools from pprint import pformat np.set_printoptions(threshold=10) # Download text pre-processor ("tokenizer") tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") # Download BERT model bert = TFBertModel.from_pretrained("bert-base-uncased") ``` ### Tokenization The first step in using BERT (or any similar text embedding tool) is to *tokenize* the data. This step standardizes blocks of text, so that meaningless differences in text presentation don't affect the behavior of our algorithm. Typically the text is transformed into a sequence of 'tokens,' each of which corresponds to a numeric code. ``` # Let's try it out! s = "What happens to this string?" print('Original String: \n\"{}\"\n'.format(s)) tensors = tokenizer(s) print('Numeric encoding: \n' + pformat(tensors)) # What does this mean? print('\nActual tokens:') tokenizer.convert_ids_to_tokens(tensors['input_ids']) ``` ### BERT in a nutshell Once we have our numeric tokens, we can simply plug them into the BERT network and get a numeric vector summary. Note that in applications, the BERT summary will be "fine tuned" to a particular task, which hasn't happened yet. ``` print('Input: "What happens to this string?"\n') # Tokenize the string tensors_tf = tokenizer("What happens to this string?", return_tensors="tf") # Run it through BERT output = bert(tensors_tf) # Inspect the output _shape = output['pooler_output'].shape print( """Output type: {}\n Output shape: {}\n Output preview: {}\n""" .format( type(output['pooler_output']), _shape, pformat(output['pooler_output'].numpy()))) ``` # A practical introduction to BERT In the next part of the notebook, we are going to explore how a tool like BERT may be useful to an econometrician. In particular, we are going to apply BERT to a subset of data from the Amazon marketplace consisting of roughly 10,000 listings for products in the toy category. Each product comes with a text description, a price, and a number of times reviewed (which we'll use as a proxy for demand / market share). **Problem 1**: What are some issues you may anticipate when using number of reviews as a proxy for demand or market share? ### Getting to know the data First, we'll download and clean up the data, and do some preliminary inspection. ``` # Download data DATA_URL = 'https://www.dropbox.com/s/on2nzeqcdgmt627/amazon_co-ecommerce_sample.csv?dl=1' data = pd.read_csv(DATA_URL) # Clean numeric data fields data['number_of_reviews'] = pd.to_numeric(data .number_of_reviews .str.replace(r"\D+",'')) data['price'] = (data .price .str.extract(r'(\d+\.*\d+)') .astype('float')) # Drop products with very few reviews data = data[data['number_of_reviews'] > 0] # Compute log prices data['ln_p'] = np.log(data.price) # Impute market shares data['ln_q'] = np.log(data['number_of_reviews'] / data['number_of_reviews'].sum()) # Collect relevant text data data[['text']] = (data[[ 'product_name', 'product_description']] .astype('str') .agg(' | '.join, axis=1)) # Drop irrelevant data and inspect data = data[['text','ln_p','ln_q']] data = data.dropna() data.head() ``` Let's make a two-way scatter plot of prices and (proxied) market shares. ``` # Plot log price against market share data.plot.scatter('ln_p','ln_q') ``` Let's begin with a simple prediction task. We will discover how well can we explain the price of these products using their textual descriptions. **Problem 2**: 1. Build a linear model that explains the price of each product using it's text embedding vector as the explanatory variables. 2. Build a two-layer perceptron neural network that explains the price of each product using the text embedding vector as input (see example code below). <!-- 3. Now, instead of taking the text embeddings as fixed, we allow the it to ``fine tune.'' Construct a neural network by combining the (pre-loaded) BERT network --> 3. Report the $R^2$ of both approaches. 4. As an econometrician, what are some concerns you may have about how to interpret these models? ``` ## First, let's split and preprocess (tokenize) the text to prepare it for BERT main = data.sample(frac=0.6,random_state=200) holdout = data.drop(main.index) tensors = tokenizer( list(main["text"]), padding=True, truncation=True, max_length=128, return_tensors="tf") ln_p = main["ln_p"] ln_q = main["ln_q"] ## Now let's prepare our model from tensorflow.keras import Model, Input from tensorflow.keras.layers import Dense, Dropout, Concatenate input_ids = Input(shape=(128,), dtype=tf.int32) token_type_ids = Input(shape=(128,), dtype=tf.int32) attention_mask = Input(shape=(128,), dtype=tf.int32) # First we compute the text embedding Z = bert(input_ids, token_type_ids, attention_mask) # We want the "pooled / summary" embedding, not individual word embeddings Z = Z[1] # Then we do a regular regression Z = Dense(128, activation='relu')(Z) Z = Dropout(0.2)(Z) Z = Dense(32, activation='relu')(Z) Z = Dropout(0.2)(Z) Z = Dense(8, activation='relu')(Z) ln_p_hat = Dense(1, activation='linear')(Z) PricePredictionNetwork = Model([input_ids, token_type_ids, attention_mask], ln_p_hat) PricePredictionNetwork.compile(optimizer='adam', loss='mse') PricePredictionNetwork.summary() PricePredictionNetwork.fit( [tensors['input_ids'], tensors['token_type_ids'], tensors['attention_mask']], ln_p, epochs=3, batch_size=16, shuffle=True) ``` Now, let's go one step further and construct a DML estimator of the average price elasticity. In particular, we will model market share $q_i$ as $$\ln q_i = \alpha + \beta \ln p_i + \psi(d_i) + \epsilon_i,$$ where $d_i$ denotes the description of product $i$ and $\psi$ is the composition of text embedding and a two-layer perceptron. **Problem 3**: 1. Split the sample in two, and predict $\ln p_i$ and $\ln q_i$ using $d_i$ with a two-layer perceptron as before, using the main sample. 2. In the holdout sample, perform an OLS regression of the residual of $\ln q_i$ on the residual of $\ln p_i$ (using the previous problem's model). 3. What do you find? ``` ## Build the quantity prediction network # Initialize new BERT model from original bert2 = TFBertModel.from_pretrained("bert-base-uncased") # Define inputs input_ids = Input(shape=(128,), dtype=tf.int32) token_type_ids = Input(shape=(128,), dtype=tf.int32) attention_mask = Input(shape=(128,), dtype=tf.int32) # First we compute the text embedding Z = bert2(input_ids, token_type_ids, attention_mask) # We want the "pooled / summary" embedding, not individual word embeddings Z = Z[1] # Construct network Z = Dense(128, activation='relu')(Z) Z = Dropout(0.2)(Z) Z = Dense(32, activation='relu')(Z) Z = Dropout(0.2)(Z) Z = Dense(8, activation='relu')(Z) ln_q_hat = Dense(1, activation='linear')(Z) # Compile model and optimization routine QuantityPredictionNetwork = Model([input_ids, token_type_ids, attention_mask], ln_q_hat) QuantityPredictionNetwork.compile(optimizer='adam', loss='mse') QuantityPredictionNetwork.summary() ## Fit the quantity prediction network in the main sample QuantityPredictionNetwork.fit( [tensors['input_ids'], tensors['token_type_ids'], tensors['attention_mask']], ln_q, epochs=3, batch_size=16, shuffle=True) ## Predict in the holdout sample, residualize and regress # Preprocess holdout sample tensors_holdout = tokenizer( list(holdout["text"]), padding=True, truncation=True, max_length=128, return_tensors="tf") # Compute predictions ln_p_hat_holdout = PricePredictionNetwork.predict([tensors_holdout['input_ids'], tensors_holdout['token_type_ids'], tensors_holdout['attention_mask']]) ln_q_hat_holdout = QuantityPredictionNetwork.predict([tensors_holdout['input_ids'], tensors_holdout['token_type_ids'], tensors_holdout['attention_mask']]) # Compute residuals r_p = holdout["ln_p"] - ln_p_hat_holdout.reshape((-1,)) r_q = holdout["ln_q"] - ln_q_hat_holdout.reshape((-1,)) # Regress to obtain elasticity estimate beta = np.mean(r_p * r_q) / np.mean(r_p * r_p) # standard error on elastiticy estimate se = np.sqrt(np.mean( (r_p* r_q)**2)/(np.mean(r_p*r_p)**2)/holdout["ln_p"].size) print('Elasticity of Demand with Respect to Price: {}'.format(beta)) print('Standard Error: {}'.format(se)) ``` ## Clustering Products In this final part of the notebook, we'll illustrate how the BERT text embeddings can be used to cluster products based on their descriptions. Intiuitively, our neural network has now learned which aspects of the text description are relevant to predict prices and market shares. We can therefore use the embeddings produced by our network to cluster products, and we might expect that the clusters reflect market-relevant information. In the following block of cells, we compute embeddings using our learned models and cluster them using $k$-means clustering with $k=10$. Finally, we will explore how the estimated price elasticity differs across clusters. ### Overview of **$k$-means clustering** The $k$-means clustering algorithm seeks to divide $n$ data vectors into $k$ groups, each of which contain points that are "close together." In particular, let $C_1, \ldots, C_k$ be a partitioning of the data into $k$ disjoint, nonempty subsets (clusters), and define $$\bar{C_i}=\frac{1}{\#C_i}\left(\sum_{x \in C_i} x\right)$$ to be the *centroid* of the cluster $C_i$. The $k$-means clustering score $\mathrm{sc}(C_1 \ldots C_k)$ is defined to be $$\mathrm{sc}(C_1 \ldots C_k) = \sum_{i=1}^k \sum_{x \in C_i} \left(x - \bar{C_i}\right)^2.$$ The $k$-means clustering is then defined to be any partitioning $C^*_1 \ldots C^*_k$ that minimizes the score $\mathrm{sc}(-)$. **Problem 4** Show that the $k$-means clustering depends only on the pairwise distances between points. *Hint: verify that $\sum_{x,y \in C_i} (x - \bar{C_i})(y - \bar{C_i}) = 0$.* ``` ## STEP 1: Compute embeddings input_ids = Input(shape=(128,), dtype=tf.int32) token_type_ids = Input(shape=(128,), dtype=tf.int32) attention_mask = Input(shape=(128,), dtype=tf.int32) Y1 = bert(input_ids, token_type_ids, attention_mask)[1] Y2 = bert2(input_ids, token_type_ids, attention_mask)[1] Y = Concatenate()([Y1,Y2]) embedding_model = Model([input_ids, token_type_ids, attention_mask], Y) embeddings = embedding_model.predict([tensors_holdout['input_ids'], tensors_holdout['token_type_ids'], tensors_holdout['attention_mask']]) ``` ### Dimension reduction and the **Johnson-Lindenstrauss transform** Our learned embeddings have dimension in the $1000$s, and $k$-means clustering is often an expensive operation. To improve the situation, we will use a neat trick that is used extensively in machine learning applications: the *Johnson-Lindenstrauss transform*. This trick involves finding a low-dimensional linear projection of the embeddings that approximately preserves pairwise distances. In fact, Johnson and Lindenstrauss proved a much more interesting statement: a Gaussian random matrix will *almost always* approximately preserve pairwise distances. **Problem 5** Suppose we have a low-dimensional projection matrix $\Pi$ that preserves pairwise distances, and let $X$ be the design matrix. Explain how and why we could compute the $k$-means clustering using only the projected data $\Pi X$. *Hint: use Problem 4.* ``` # STEP 2 Make low-dimensional projections from sklearn.random_projection import GaussianRandomProjection jl = GaussianRandomProjection(eps=.25) embeddings_lowdim = jl.fit_transform(embeddings) # STEP 3 Compute clusters from sklearn.cluster import KMeans k_means = KMeans(n_clusters=10) k_means.fit(embeddings_lowdim) cluster_ids = k_means.labels_ # STEP 4 Regress within each cluster betas = np.zeros(10) ses = np.zeros(10) for c in range(10): r_p_c = r_p[cluster_ids == c] r_q_c = r_q[cluster_ids == c] # Regress to obtain elasticity estimate betas[c] = np.mean(r_p_c * r_q_c) / np.mean(r_p_c * r_p_c) # standard error on elastiticy estimate ses[c] = np.sqrt(np.mean( (r_p_c* r_q_c)**2)/(np.mean(r_p_c*r_p_c)**2)/r_p_c.size) # STEP 5 Plot from matplotlib import pyplot as plt plt.bar(range(10),betas, yerr = ses) ```
github_jupyter
# Weather Underground Hurricane Data ----- ## Processed Data Research A notebook for researching the processed Weather Underground data from the ```src/process_data.py``` script. ``` processed_data_dir = '../data/processed/' media_dir = '../media' figsize_width = 12 figsize_height = 8 output_dpi = 72 # Imports import os import pickle import numpy as np import pandas as pd from datetime import datetime import matplotlib.pyplot as plt # Load Data with open(os.path.join(processed_data_dir, 'region_data.pkl'), 'rb') as fin: region_df = pickle.load(fin) with open(os.path.join(processed_data_dir, 'region_yearly_data.pkl'), 'rb') as fin: region_yearly_df = pickle.load(fin) with open(os.path.join(processed_data_dir, 'storm_track_data.pkl'), 'rb') as fin: storm_track_dict = pickle.load(fin) # - Variable setup default_fig_size = (figsize_width, figsize_height) # - Plot data by region regions = ['North Atlantic', 'East Pacific', 'Western Pacific', 'Indian Ocean'] stats = ['Storms', 'Hurricanes', 'Deaths', 'Damage'] colors = ['#2d758c', '#cf4917', '#f9ac3d', '#758c33'] color_dict = dict(zip(regions, colors)) fig, axs = plt.subplots(nrows=4, ncols=4, sharex=True, figsize=default_fig_size) i_col = 0 for region in regions: t_reg_df = region_df.loc[:, region] i_row = 0 for statistic in stats: ax = axs[i_row][i_col] clr = color_dict[region] t_reg_df.loc[:, statistic].plot(ax=ax, color=clr) ax.grid(linestyle='--', color='grey', alpha=0.5) ax.set_yticklabels([]) if i_col == 0: ax.set_ylabel(statistic) if i_row == 0: ax.set_title(region) i_row += 1 i_col += 1 fig.suptitle('Data by Region', fontweight='bold', va='top') fig.savefig(os.path.join(media_dir, 'region_data_by_region_stat.png'), dpi=output_dpi) plt.show(); # - Get common starting date plt_start = region_df.first_valid_index() for region in set(region_df.columns.get_level_values('Region')): t_df = region_df.loc[:, pd.IndexSlice[region, 'Hurricanes']] t_df[t_df == 0.] = np.nan t_start_dt = t_df.first_valid_index() if t_start_dt > plt_start: plt_start = t_start_dt print("Common starting date: {}".format(plt_start)) # - Total occurences over time agg_data = region_df.groupby(level='Statistic', axis=1).sum().loc[plt_start:] pct_hurricanes = agg_data.loc[:, 'Hurricanes'] / agg_data.loc[:, 'Storms'] avg_counts = agg_data.loc[:, ['Hurricanes', 'Storms']].mean().values avg_pct = pct_hurricanes.mean() # - Plot plot_percentages = False fig, ax = plt.subplots(figsize=default_fig_size) agg_data.loc[:, 'Storms'].plot.area(ax=ax, alpha=1, color='#41a8c9', zorder=1) agg_data.loc[:, 'Hurricanes'].plot.area(ax=ax, alpha=1, color='#ec8055', zorder=2) ax.axhline(avg_counts[1], label='Storms Mean ({:.0f})'.format(avg_counts[1]), color='#2d758c', alpha=0.9, linestyle='--', linewidth=2, zorder=3) ax.axhline(avg_counts[0], label='Hurricanes Mean ({:.0f})'.format(avg_counts[0]), color='#cf4917', alpha=0.9, linestyle='--', linewidth=2, zorder=3) ax.set_title('Storms and Hurricanes over Time (All Regions)', fontweight='bold') ax.set_ylabel('# of Occurrences'); ax.set_xlabel('') lines, labels = ax.get_legend_handles_labels() if plot_percentages: ax2 = (pct_hurricanes * 100.).plot(ax=ax, secondary_y=True, zorder=4, linestyle='-', color='#d0b285', linewidth=2.5, label='Percent Hurricanes') ax2.axhline(avg_pct*100., label='Percent Mean ({:.1f}%)'.format(100.*avg_pct), color='#a2783c', alpha=0.9, linestyle='--', linewidth=2, zorder=5) ax2.set_ylim((0, 100)) ax2.set_ylabel('Percent (%)') lines_2, labels_2 = ax2.get_legend_handles_labels() ax.legend(lines[::-1]+lines_2, labels[::-1]+labels_2, loc='upper right') else: ax.legend(lines[::-1], labels[::-1], loc='upper right') fig.savefig(os.path.join(media_dir, 'storms_hurricanes_all_regions.png'), dpi=output_dpi) plt.show(); # - Get avg max winds data start_dates = region_yearly_df.loc[:, ['Start Date']].set_index('Start Date').index cut_region_yearly = region_yearly_df.loc[start_dates.year >= plt_start, :] cut_start_dates = cut_region_yearly.loc[:, ['Start Date']].set_index('Start Date').index avg_max_wind_speed = cut_region_yearly.loc[:, 'Max Winds'].groupby(cut_start_dates.year).mean() # - Plot fig, ax = plt.subplots(figsize=default_fig_size) avg_max_wind_speed.plot(ax=ax, color='#2d758c') ax.grid(linestyle='--', color='grey', alpha=0.5) ax.set_ylim(0, 100) ax.set_title('Average Max Wind Speed over Time (All Regions)', fontweight='bold') ax.set_ylabel('Wind Speed (mph)') ax.set_xlabel('') fig.savefig(os.path.join(media_dir, 'avg_max_wind_speed_by_year.png'), dpi=output_dpi) plt.show(); # - Avg Max Wind Speed by Region reg_avgmaxwind = cut_region_yearly.groupby(['Region', cut_start_dates.year], axis=0) \ .mean().loc[:, 'Max Winds'].unstack('Region') reg_counts = cut_region_yearly.groupby(['Region', cut_start_dates.year], axis=0).count() \ .loc[:, 'Max Winds'].unstack('Region') # - Plot fig, axs = plt.subplots(nrows=4, ncols=2, sharex=False, figsize=(figsize_width, figsize_width)) i = 0 for reg in reg_avgmaxwind.columns: ax = axs[i][0] reg_avgmaxwind.loc[:, reg].plot(ax=ax, label=reg, color=color_dict[reg]) ax.set_ylim((0, 120)) ax.set_xlim((1960, 2016)) ax.grid(linestyle='--', color='grey', alpha=0.5) ax.set_ylabel(reg) ax.set_xlabel('') ax = axs[i][1] reg_counts.loc[:, reg].plot(ax=ax, kind='bar', label='Count', color=color_dict[reg]) ax.xaxis.set_major_locator(plt.MultipleLocator(10)) ax.xaxis.set_ticklabels([plt_start] + list(range(plt_start, 2015, 10)), rotation=0) ax.set_xlabel('') i += 1 axs[0][0].set_title('Avg Max Wind Speed by Year (mph)', fontweight='bold') axs[0][1].set_title('Data Count by Year', fontweight='bold') axs[-1][0].set_xlabel('') axs[-1][1].set_xlabel('') fig.savefig(os.path.join(media_dir, 'avg_max_winds_by_region.png'), dpi=output_dpi) plt.show(); # - North Atlantic Specific Focus allyr_avgmaxwind = region_yearly_df.groupby(['Region', start_dates.year], axis=0) \ .mean().loc[:, 'Max Winds'].unstack('Region') allyr_counts = region_yearly_df.groupby(['Region', start_dates.year], axis=0) \ .count().loc[:, 'Max Winds'].unstack('Region') fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(figsize_width, figsize_height/2)) ax = axs[0] allyr_avgmaxwind.loc[:, 'North Atlantic'].plot(ax=ax, color=color_dict['North Atlantic']) ax.set_ylim(0, 120) ax.grid(True, color='grey', alpha=0.6, linestyle='--') ax.set_title('') ax.set_ylabel('Average Max Wind Speed (mph)') ax.set_xlabel('') ax = axs[1] allyr_counts.loc[:, 'North Atlantic'].plot(ax=ax, kind='bar', label='Count', color=color_dict['North Atlantic']) disp_mult = 20 ax.xaxis.set_major_locator(plt.MultipleLocator(disp_mult)) ax.xaxis.set_ticklabels([allyr_counts.index[0]] + list(range(allyr_counts.index[0], 2015, disp_mult)), rotation=0) ax.set_title('') ax.set_ylabel('# of Data Points') ax.set_xlabel('') fig.suptitle('North Atlantic (All Data)', fontweight='bold') fig.savefig(os.path.join(media_dir, 'north_atlantic_max_wind_speed_all.png'), dpi=output_dpi) plt.show(); # - 1945 Onward for North Atlantic fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(figsize_width, figsize_height/2)) ax = axs[0] allyr_avgmaxwind.loc[1945:, 'North Atlantic'].plot(ax=ax, color=color_dict['North Atlantic']) ax.set_ylim(0, 120) ax.grid(True, color='grey', alpha=0.6, linestyle='--') ax.set_title('') ax.set_ylabel('Average Max Wind Speed (mph)') ax.set_xlabel('') ax = axs[1] allyr_counts.loc[1945:, 'North Atlantic'].plot(ax=ax, kind='bar', label='Count', color=color_dict['North Atlantic']) disp_mult = 10 ax.xaxis.set_major_locator(plt.MultipleLocator(disp_mult)) ax.xaxis.set_ticklabels([1945] + list(range(1945, 2015, disp_mult)), rotation=0) ax.set_title('') ax.set_ylabel('# of Data Points') ax.set_xlabel('') fig.suptitle('North Atlantic (since 1945)', fontweight='bold') fig.savefig(os.path.join(media_dir, 'north_atlantic_max_wind_speed_1945.png'), dpi=output_dpi) plt.show(); # - Tack on IsHurricane Columns to Regional-Yearly data def _classify_helper(storm_name): """Helper function to classify 'hurricanes'""" ret = False designations = ['hurricane', 'typhoon'] storm_name = storm_name.lower() all_words = storm_name.split(' ') for designation in designations: ret |= (designation in all_words) return ret regyr_wclass = region_yearly_df.copy() is_hurricane = np.zeros((regyr_wclass.shape[0], 1)) for row_id, vals in enumerate(regyr_wclass.values): is_hurricane[row_id] = 1 if _classify_helper(vals[1]) else 0 regyr_wclass['IsHurricane'] = regyr_wclass.loc[:, 'Max Winds'] >= 75.0 # - 1945 Onward for North Atlantic (Hurricanes Only) hurricane_data = regyr_wclass.loc[regyr_wclass.loc[:, 'IsHurricane'] == 1] \ .drop('IsHurricane', axis=1) hurricane_years = hurricane_data.loc[:, ['Start Date']].set_index('Start Date').index regavg_hurricanes = hurricane_data.groupby(['Region', hurricane_years.year], axis=0) \ .mean().loc[:, 'Max Winds'].unstack('Region') regcnt_hurricanes = hurricane_data.groupby(['Region', hurricane_years.year], axis=0) \ .count().loc[:, 'Max Winds'].unstack('Region') # -- Plot fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(figsize_width, figsize_height/2)) ax = axs[0] regavg_hurricanes.loc[1945:, 'North Atlantic'].plot(ax=ax, color=color_dict['North Atlantic']) ax.set_ylim(0, 140) ax.grid(True, color='grey', alpha=0.6, linestyle='--') ax.set_title('') ax.set_ylabel('Average Max Wind Speed (mph)') ax.set_xlabel('') ax = axs[1] regcnt_hurricanes.loc[1945:, 'North Atlantic'].plot(ax=ax, kind='bar', label='Count', color=color_dict['North Atlantic']) disp_mult = 10 ax.xaxis.set_major_locator(plt.MultipleLocator(disp_mult)) ax.xaxis.set_ticklabels([1945] + list(range(1945, 2015, disp_mult)), rotation=0) ax.set_title('') ax.set_ylabel('# of Data Points') ax.set_xlabel('') fig.suptitle('North Atlantic Hurricanes', fontweight='bold') fig.savefig(os.path.join(media_dir, 'north_atlantic_hurricanes_max_wind_speed_1945.png'), dpi=output_dpi) plt.show(); # - 1945 Onward for North Atlantic (Hurricanes Only) non_hurr_data = regyr_wclass.loc[regyr_wclass.loc[:, 'IsHurricane'] == 0] \ .drop('IsHurricane', axis=1) non_hurr_years = non_hurr_data.loc[:, ['Start Date']].set_index('Start Date').index regavg_nonhurrs = non_hurr_data.groupby(['Region', non_hurr_years.year], axis=0) \ .mean().loc[:, 'Max Winds'].unstack('Region') regcnt_nonhurrs = non_hurr_data.groupby(['Region', non_hurr_years.year], axis=0) \ .count().loc[:, 'Max Winds'].unstack('Region') # -- Plot fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(figsize_width, figsize_height/2)) ax = axs[0] regavg_nonhurrs.loc[1945:, 'North Atlantic'].plot(ax=ax, color=color_dict['North Atlantic']) ax.set_ylim(0, 140) ax.grid(True, color='grey', alpha=0.6, linestyle='--') ax.set_title('') ax.set_ylabel('Average Max Wind Speed (mph)') ax.set_xlabel('') ax = axs[1] regcnt_nonhurrs.loc[1945:, 'North Atlantic'].plot(ax=ax, kind='bar', label='Count', color=color_dict['North Atlantic']) disp_mult = 10 ax.xaxis.set_major_locator(plt.MultipleLocator(disp_mult)) ax.xaxis.set_ticklabels([1945] + list(range(1945, 2015, disp_mult)), rotation=0) ax.set_title('') ax.set_ylabel('# of Data Points') ax.set_xlabel('') fig.suptitle('North Atlantic Non-Hurricanes', fontweight='bold') fig.savefig(os.path.join(media_dir, 'north_atlantic_non_hurricanes_max_wind_speed_1945.png'), dpi=output_dpi) plt.show(); # - Hurricanes vs. Non-hurricanes by Region hurr_prop = (regcnt_hurricanes.fillna(0) / (regcnt_nonhurrs + regcnt_hurricanes.fillna(0))) # -- Plot fig, axs = plt.subplots(nrows=4, ncols=3, figsize=(figsize_width, figsize_width/1.3)) for i_reg in range(len(regions)): reg = regions[i_reg] ax = axs[i_reg][0] regavg_hurricanes.loc[1945:, reg].plot(ax=ax, color=color_dict[reg]) ax.set_ylim((0, 150)) ax.set_xlim((1944, 2016)) ax.grid(linestyle='--', color='grey', alpha=0.5) ax.set_ylabel(reg) ax.set_xlabel('') ax = axs[i_reg][1] regavg_nonhurrs.loc[1945:, reg].plot(ax=ax, color=color_dict[reg]) ax.set_ylim((0, 150)) ax.set_xlim((1944, 2016)) ax.grid(linestyle='--', color='grey', alpha=0.5) ax.set_ylabel('') ax.set_xlabel('') ax = axs[i_reg][2] (hurr_prop * 100.).loc[1945:, reg].plot(ax=ax, color=color_dict[reg]) ax.grid(linestyle='--', color='grey', alpha=0.5) ax.set_ylim(0, 100) ax.set_xlabel('') axs[0][0].set_title('Hurricanes') axs[0][1].set_title('Non-Hurricanes') axs[0][2].set_title('Proportion Hurricanes (%)') fig.suptitle('Hurricanes and Non-hurricanes by Region', fontweight='bold', va='top') fig.savefig(os.path.join(media_dir, 'hurr_vs_non_hurr_stats_region.png'), dpi=output_dpi) plt.show(); ```
github_jupyter
# Setup initial *O slabs to run --- # Import Modules ``` import os print(os.getcwd()) import sys import json import pickle from shutil import copyfile import numpy as np import pandas as pd from ase import io from tqdm.notebook import tqdm from IPython.display import display # ######################################################### from methods import ( get_df_slab, get_df_jobs, ) from proj_data import metal_atom_symbol # ######################################################### from dft_workflow_methods import ( get_job_spec_dft_params, get_job_spec_scheduler_params, submit_job, calc_wall_time) ``` # Script Inputs ``` # Slac queue to submit to slac_sub_queue = "suncat3" # 'suncat', 'suncat2', 'suncat3' # COMPENV to submit to # compenv_i = "slac" # compenv_i = "sherlock" compenv_i = "nersc" ``` # Read Data ``` # ######################################################### df_slab = get_df_slab() df_slab = df_slab.set_index("slab_id") df_slab_i = df_slab # ######################################################### df_jobs = get_df_jobs() ``` ### Read `df_slabs_to_run` from `create_slabs.ipynb`, used to mark priority slabs ``` directory = os.path.join( os.environ["PROJ_irox_oer"], "workflow/creating_slabs", "out_data") # ######################################################### import pickle; import os path_i = os.path.join( directory, "df_slabs_to_run.pickle") with open(path_i, "rb") as fle: df_slabs_to_run = pickle.load(fle) # ######################################################### indices_not_good = [] for i_cnt, row_i in df_slabs_to_run.iterrows(): df = df_slab_i df = df[ (df["bulk_id"] == row_i.bulk_id) & (df["facet"] == row_i.facet_str) & [True for i in range(len(df))] ] if df.shape[0] == 0: indices_not_good.append(i_cnt) df_slabs_to_run.loc[ indices_not_good ] ``` # Selecting Slabs to Run ``` # Dropping slabs that have been previously done df_jobs_i = df_jobs[df_jobs.ads == "o"] df_slab_i = df_slab_i.drop( df_jobs_i.slab_id.unique() ) # Doing only phase 2 slabs for now df_slab_i = df_slab_i[df_slab_i.phase == 2] # ######################################################### # Selecting smallest slabs # df_slab_i = df_slab_i[df_slab_i.num_atoms < 80] # print("Just doing XRD facets for now") # df_slab_i = df_slab_i[df_slab_i.source == "xrd"] ``` ### Filtering down to best slabs, no layered, all octahedra, 0.3 eV/atom above hull cutoff ``` good_slabs = [] for slab_id_i, row_i in df_slab_i.iterrows(): # #################################################### bulk_id_i = row_i.bulk_id facet_i = row_i.facet # #################################################### # print("") # print(bulk_id_i, slab_id_i) df = df_slabs_to_run df = df[ (df["bulk_id"] == bulk_id_i) & (df["facet_str"] == facet_i) & [True for i in range(len(df))] ] if df.shape[0] > 0: # print("Good") good_slabs.append(slab_id_i) # elif df.shape[0] == 0: # print("Bad") df_slab_i = df_slab_i.loc[ good_slabs ] df = df_slab_i df = df[ (df["num_atoms"] <= 100) & # (df[""] == "") & # (df[""] == "") & [True for i in range(len(df))] ] df_slab_i = df df_slab_i = df_slab_i.sort_values("num_atoms", ascending=False) df_slab_i df_slab_i.index.tolist() df_slab_i = df_slab_i.loc[ [ 'legofufi_61', 'gekawore_16', 'mitilaru_63', # 'winomuvi_99', # 'letapivu_80', # 'giworuge_14', # 'lirilapa_78', # 'wakidowo_59', # 'kererape_22', # 'nekelele_74', # 'pebitiru_79', ] ] df_slab_i assert False ``` # Setting up the job folders ``` data_dict_list = [] for i_cnt, row_i in df_slab_i.iterrows(): data_dict_i = dict() # ##################################################### slab_id = row_i.name bulk_id = row_i.bulk_id facet = row_i.facet slab_final = row_i.slab_final num_atoms = row_i.num_atoms loop_time = row_i.loop_time iter_time_i = row_i.iter_time_i # ##################################################### attempt = 1 rev = 1 # Checking if job dir exists for other comp. envs. (it shouldn't) job_exists_in_another_compenv = False path_already_exists = False for compenv_j in ["slac", "sherlock", "nersc", ]: path_j = os.path.join( os.environ["PROJ_irox_oer_gdrive"], "dft_workflow/run_slabs/run_o_covered/out_data/dft_jobs", compenv_j, bulk_id, facet, str(attempt).zfill(2) + "_attempt", "_" + str(rev).zfill(2) ) if os.path.exists(path_j) and compenv_j == compenv_i: path_already_exists = True print("This path already exists", path_j) elif os.path.exists(path_j): job_exists_in_another_compenv = True print("Job exists in another COMPENV", path_j) good_to_go = True if job_exists_in_another_compenv: good_to_go = False if path_already_exists: good_to_go = False if good_to_go: path_i = os.path.join( os.environ["PROJ_irox_oer_gdrive"], "dft_workflow/run_slabs/run_o_covered/out_data/dft_jobs", compenv_i, bulk_id, facet, str(attempt).zfill(2) + "_attempt", "_" + str(rev).zfill(2) ) print(path_i) if os.path.exists(path_i): print("TEMP | This path already exists and it shouldn't", path_i) if not os.path.exists(path_i): os.makedirs(path_i) # ##################################################### # Copy dft script to job folder # ##################################################### copyfile( os.path.join( os.environ["PROJ_irox_oer"], "dft_workflow/dft_scripts/slab_dft.py" ), os.path.join( path_i, "model.py", ), ) copyfile( os.path.join( os.environ["PROJ_irox_oer"], "dft_workflow/dft_scripts/slab_dft.py" ), os.path.join( path_i, "slab_dft.py", ), ) # ##################################################### # Copy atoms object to job folder # ##################################################### slab_final.write( os.path.join(path_i, "init.traj") ) # ##################################################### data_dict_i["slab_id"] = slab_id data_dict_i["bulk_id"] = bulk_id data_dict_i["facet"] = facet data_dict_i["slab_final"] = slab_final data_dict_i["num_atoms"] = num_atoms data_dict_i["attempt"] = attempt data_dict_i["rev"] = rev data_dict_i["path_i"] = path_i # ##################################################### data_dict_list.append(data_dict_i) # ##################################################### # ######################################################### df_jobs_new = pd.DataFrame(data_dict_list) df_jobs_new = df_jobs_new.set_index("slab_id") # ######################################################### ``` # Assigning job specific DFT parameters ``` data_dict_list = [] for i_cnt, row_i in df_jobs_new.iterrows(): data_dict_i = dict() # ##################################################### slab_id = row_i.name num_atoms = row_i.num_atoms path_i =row_i.path_i # ##################################################### dft_params_dict = get_job_spec_dft_params( compenv=compenv_i, slac_sub_queue="suncat3", ) # ##################################################### data_dict_i["slab_id"] = slab_id data_dict_i["dft_params"] = dft_params_dict # ##################################################### data_dict_list.append(data_dict_i) # ##################################################### df_dft_params = pd.DataFrame(data_dict_list) df_dft_params = df_dft_params.set_index("slab_id") # ######################################################### # Writing DFT params to job directory for slab_id, row_i in df_dft_params.iterrows(): # ##################################################### dft_params = row_i.dft_params # ##################################################### row_slab_i = df_jobs_new.loc[slab_id] path_i = row_slab_i.path_i # ##################################################### with open(os.path.join(path_i, "dft-params.json"), "w+") as fle: json.dump(dft_params, fle, indent=2, skipkeys=True) ``` # Setting initial magnetic moments ``` data_dict_list = [] for i_cnt, row_i in df_jobs_new.iterrows(): # ##################################################### atoms = row_i.slab_final path_i =row_i.path_i # ##################################################### z_positions = atoms.positions[:, 2] z_max = z_positions.max() O_magmom=0.2 M_magmom=0.6 magmoms_i = [] for atom in atoms: z_pos = atom.position[2] dist_from_top = z_max - z_pos # print(z_max - z_pos) if dist_from_top < 4: if atom.symbol == "O": magmom_i = O_magmom else: magmom_i = M_magmom magmoms_i.append(magmom_i) else: magmoms_i.append(0.) data_path = os.path.join(path_i, "magmoms.json") with open(data_path, "w") as outfile: json.dump(magmoms_i, outfile, indent=2) print("Paths of new jobs:") tmp = [print(i) for i in df_jobs_new.path_i.tolist()] # ######################################################### print(20 * "# # ") print("All done!") print("setup_dft.ipynb") print(20 * "# # ") # ######################################################### ``` ``` # df_slab_i # assert False # df_slab_i # df_slab_i = df_slab_i.iloc[[0]] # assert False ```
github_jupyter
``` import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import pandas_profiling data=pd.read_csv("/home/manikanta/Documents/ML/classification/Random Forest/hcvdat0.csv") data.head() data.tail() data['Category'].value_counts(normalize=True) data.shape # Import label encoder from sklearn import preprocessing # label_encoder object knows how to understand word labels. label_encoder = preprocessing.LabelEncoder() # Encode labels in column 'species'. data['Category']= label_encoder.fit_transform(data['Category']) data['Category'].unique() data['Sex']= label_encoder.fit_transform(data['Sex']) data['Sex'].unique() data.head() data.tail() import seaborn as sns sns.countplot(x='Category',data=data) import seaborn as sns sns.countplot(x='Sex',data=data) data.columns data.isna().sum() data['ALB'].fillna(data['ALB'].mode()[0], inplace=True) data['ALP'].fillna(data['ALP'].mode()[0], inplace=True) data['ALT'].fillna(data['ALT'].mode()[0], inplace=True) data['CHOL'].fillna(data['CHOL'].mode()[0], inplace=True) data['PROT'].fillna(data['PROT'].mode()[0],inplace=True) data.isna().sum() x=data.drop(['Unnamed: 0', 'Category'],axis=1) y=data['Category'] from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.3, random_state=0) from sklearn.ensemble import RandomForestClassifier rclf = RandomForestClassifier() rclf.fit(X_train,y_train) train_score=rclf.score(X_train,y_train) train_score test_score=rclf.score(X_test,y_test) test_score from sklearn.metrics import accuracy_score,recall_score,confusion_matrix y_pred = rclf.predict(X_test) print(len(X_test)) print(accuracy_score(y_test,y_pred)) print(confusion_matrix(y_test,y_pred)) y_pred pred_prob=pd.DataFrame({'Acutual_Data':y_test,'New_data':y_pred}) pred_prob from sklearn.metrics import classification_report print(classification_report(y_test, y_pred)) # Applying 10-Fold Cross Validation from sklearn.model_selection import cross_val_score scores = cross_val_score(rclf, X_train, y_train, cv = 10, scoring='accuracy') print('Cross-validation scores:{}'.format(scores)) # compute Average cross-validation score print('Average cross-validation score: {:.4f}'.format(scores.mean())) #from sklearn.externals.six import StringIO #from IPython.display import Image #from sklearn.tree import export_graphviz #import pydotplus #dot_data = StringIO() #export_graphviz(rclf, out_file=dot_data, # filled=True, rounded=True, # special_characters=True, feature_names =x,class_names=['0','1','2','3','4']) #graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) #graph.write_png('model/diabetes1.png') #Image(graph.create_png()) #from sklearn import tree #from sklearn.tree import export_graphviz #fn=x #cn=y #fig, axes = plt.subplots(nrows = 615,ncols = 14,figsize = (4,4), dpi=800) #tree.plot_tree(rclf.estimators_[0], # feature_names = fn, # class_names=cn, # filled = True); #fig.savefig('rf_individualtree.png') ```
github_jupyter
##### Copyright 2020 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Customizing a Transformer Encoder <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/official_models/nlp/customize_encoder"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/models/blob/master/official/colab/nlp/customize_encoder.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/models/blob/master/official/colab/nlp/customize_encoder.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/models/official/colab/nlp/customize_encoder.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> ## Learning objectives The [TensorFlow Models NLP library](https://github.com/tensorflow/models/tree/master/official/nlp/modeling) is a collection of tools for building and training modern high performance natural language models. The [TransformEncoder](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/networks/encoder_scaffold.py) is the core of this library, and lots of new network architectures are proposed to improve the encoder. In this Colab notebook, we will learn how to customize the encoder to employ new network architectures. ## Install and import ### Install the TensorFlow Model Garden pip package * `tf-models-nightly` is the nightly Model Garden package created daily automatically. * `pip` will install all models and dependencies automatically. ``` !pip install -q tf-nightly !pip install -q tf-models-nightly ``` ### Import Tensorflow and other libraries ``` import numpy as np import tensorflow as tf from official.modeling import activations from official.nlp import modeling from official.nlp.modeling import layers, losses, models, networks ``` ## Canonical BERT encoder Before learning how to customize the encoder, let's firstly create a canonical BERT enoder and use it to instantiate a `BertClassifier` for classification task. ``` cfg = { "vocab_size": 100, "hidden_size": 32, "num_layers": 3, "num_attention_heads": 4, "intermediate_size": 64, "activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "sequence_length": 16, "type_vocab_size": 2, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), } bert_encoder = modeling.networks.TransformerEncoder(**cfg) def build_classifier(bert_encoder): return modeling.models.BertClassifier(bert_encoder, num_classes=2) canonical_classifier_model = build_classifier(bert_encoder) ``` `canonical_classifier_model` can be trained using the training data. For details about how to train the model, please see the colab [fine_tuning_bert.ipynb](https://github.com/tensorflow/models/blob/master/official/colab/fine_tuning_bert.ipynb). We skip the code that trains the model here. After training, we can apply the model to do prediction. ``` def predict(model): batch_size = 3 np.random.seed(0) word_ids = np.random.randint( cfg["vocab_size"], size=(batch_size, cfg["sequence_length"])) mask = np.random.randint(2, size=(batch_size, cfg["sequence_length"])) type_ids = np.random.randint( cfg["type_vocab_size"], size=(batch_size, cfg["sequence_length"])) print(model([word_ids, mask, type_ids], training=False)) predict(canonical_classifier_model) ``` ## Customize BERT encoder One BERT encoder consists of an embedding network and multiple transformer blocks, and each transformer block contains an attention layer and a feedforward layer. We provide easy ways to customize each of those components via (1) [EncoderScaffold](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/networks/encoder_scaffold.py) and (2) [TransformerScaffold](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/transformer_scaffold.py). ### Use EncoderScaffold `EncoderScaffold` allows users to provide a custom embedding subnetwork (which will replace the standard embedding logic) and/or a custom hidden layer class (which will replace the `Transformer` instantiation in the encoder). #### Without Customization Without any customization, `EncoderScaffold` behaves the same the canonical `TransformerEncoder`. As shown in the following example, `EncoderScaffold` can load `TransformerEncoder`'s weights and output the same values: ``` default_hidden_cfg = dict( num_attention_heads=cfg["num_attention_heads"], intermediate_size=cfg["intermediate_size"], intermediate_activation=activations.gelu, dropout_rate=cfg["dropout_rate"], attention_dropout_rate=cfg["attention_dropout_rate"], kernel_initializer=tf.keras.initializers.TruncatedNormal(0.02), ) default_embedding_cfg = dict( vocab_size=cfg["vocab_size"], type_vocab_size=cfg["type_vocab_size"], hidden_size=cfg["hidden_size"], seq_length=cfg["sequence_length"], initializer=tf.keras.initializers.TruncatedNormal(0.02), dropout_rate=cfg["dropout_rate"], max_seq_length=cfg["sequence_length"], ) default_kwargs = dict( hidden_cfg=default_hidden_cfg, embedding_cfg=default_embedding_cfg, num_hidden_instances=cfg["num_layers"], pooled_output_dim=cfg["hidden_size"], return_all_layer_outputs=True, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(0.02), ) encoder_scaffold = modeling.networks.EncoderScaffold(**default_kwargs) classifier_model_from_encoder_scaffold = build_classifier(encoder_scaffold) classifier_model_from_encoder_scaffold.set_weights( canonical_classifier_model.get_weights()) predict(classifier_model_from_encoder_scaffold) ``` #### Customize Embedding Next, we show how to use a customized embedding network. We firstly build an embedding network that will replace the default network. This one will have 2 inputs (`mask` and `word_ids`) instead of 3, and won't use positional embeddings. ``` word_ids = tf.keras.layers.Input( shape=(cfg['sequence_length'],), dtype=tf.int32, name="input_word_ids") mask = tf.keras.layers.Input( shape=(cfg['sequence_length'],), dtype=tf.int32, name="input_mask") embedding_layer = modeling.layers.OnDeviceEmbedding( vocab_size=cfg['vocab_size'], embedding_width=cfg['hidden_size'], initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), name="word_embeddings") word_embeddings = embedding_layer(word_ids) attention_mask = layers.SelfAttentionMask()([word_embeddings, mask]) new_embedding_network = tf.keras.Model([word_ids, mask], [word_embeddings, attention_mask]) ``` Inspecting `new_embedding_network`, we can see it takes two inputs: `input_word_ids` and `input_mask`. ``` tf.keras.utils.plot_model(new_embedding_network, show_shapes=True, dpi=48) ``` We then can build a new encoder using the above `new_embedding_network`. ``` kwargs = dict(default_kwargs) # Use new embedding network. kwargs['embedding_cls'] = new_embedding_network kwargs['embedding_data'] = embedding_layer.embeddings encoder_with_customized_embedding = modeling.networks.EncoderScaffold(**kwargs) classifier_model = build_classifier(encoder_with_customized_embedding) # ... Train the model ... print(classifier_model.inputs) # Assert that there are only two inputs. assert len(classifier_model.inputs) == 2 ``` #### Customized Transformer User can also override the [hidden_cls](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/networks/encoder_scaffold.py#L103) argument in `EncoderScaffold`'s constructor to employ a customized Transformer layer. See [ReZeroTransformer](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/rezero_transformer.py) for how to implement a customized Transformer layer. Following is an example of using `ReZeroTransformer`: ``` kwargs = dict(default_kwargs) # Use ReZeroTransformer. kwargs['hidden_cls'] = modeling.layers.ReZeroTransformer encoder_with_rezero_transformer = modeling.networks.EncoderScaffold(**kwargs) classifier_model = build_classifier(encoder_with_rezero_transformer) # ... Train the model ... predict(classifier_model) # Assert that the variable `rezero_alpha` from ReZeroTransformer exists. assert 'rezero_alpha' in ''.join([x.name for x in classifier_model.trainable_weights]) ``` ### Use [TransformerScaffold](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/transformer_scaffold.py) The above method of customizing `Transformer` requires rewriting the whole `Transformer` layer, while sometimes you may only want to customize either attention layer or feedforward block. In this case, [TransformerScaffold](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/transformer_scaffold.py) can be used. #### Customize Attention Layer User can also override the [attention_cls](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/transformer_scaffold.py#L45) argument in `TransformerScaffold`'s constructor to employ a customized Attention layer. See [TalkingHeadsAttention](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/talking_heads_attention.py) for how to implement a customized `Attention` layer. Following is an example of using [TalkingHeadsAttention](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/talking_heads_attention.py): ``` # Use TalkingHeadsAttention hidden_cfg = dict(default_hidden_cfg) hidden_cfg['attention_cls'] = modeling.layers.TalkingHeadsAttention kwargs = dict(default_kwargs) kwargs['hidden_cls'] = modeling.layers.TransformerScaffold kwargs['hidden_cfg'] = hidden_cfg encoder = modeling.networks.EncoderScaffold(**kwargs) classifier_model = build_classifier(encoder) # ... Train the model ... predict(classifier_model) # Assert that the variable `pre_softmax_weight` from TalkingHeadsAttention exists. assert 'pre_softmax_weight' in ''.join([x.name for x in classifier_model.trainable_weights]) ``` #### Customize Feedforward Layer Similiarly, one could also customize the feedforward layer. See [GatedFeedforward](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/gated_feedforward.py) for how to implement a customized feedforward layer. Following is an example of using [GatedFeedforward](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/gated_feedforward.py). ``` # Use TalkingHeadsAttention hidden_cfg = dict(default_hidden_cfg) hidden_cfg['feedforward_cls'] = modeling.layers.GatedFeedforward kwargs = dict(default_kwargs) kwargs['hidden_cls'] = modeling.layers.TransformerScaffold kwargs['hidden_cfg'] = hidden_cfg encoder_with_gated_feedforward = modeling.networks.EncoderScaffold(**kwargs) classifier_model = build_classifier(encoder_with_gated_feedforward) # ... Train the model ... predict(classifier_model) # Assert that the variable `gate` from GatedFeedforward exists. assert 'gate' in ''.join([x.name for x in classifier_model.trainable_weights]) ``` ### Build a new Encoder using building blocks from KerasBERT. Finally, you could also build a new encoder using building blocks in the modeling library. See [AlbertTransformerEncoder](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/networks/albert_transformer_encoder.py) as an example: ``` albert_encoder = modeling.networks.AlbertTransformerEncoder(**cfg) classifier_model = build_classifier(albert_encoder) # ... Train the model ... predict(classifier_model) ``` Inspecting the `albert_encoder`, we see it stacks the same `Transformer` layer multiple times. ``` tf.keras.utils.plot_model(albert_encoder, show_shapes=True, dpi=48) ```
github_jupyter
# Concolic Fuzzing We have previously seen how one can use dynamic taints to produce more intelligent test cases than simply looking for program crashes. We have also seen how one can use the taints to update the grammar, and hence focus more on the dangerous methods. While taints are helpful, uninterpreted strings is only one of the attack vectors. Can we say anything more about the properties of variables at any point in the execution? For example, can we say for sure that a function will always receive the buffers with the correct length? Concolic execution offers a solution. The idea of _concolic execution_ over a function is as follows: We start with a sample input for the function, and execute the function under trace. At each point the execution passes through a conditional, we save the conditional encountered in the form of relations between symbolic variables. (A _symbolic variable_ can be thought of as a sort of placeholder for the real variable, sort of like the x in solving for x in Algebra. The symbolic variables can be used to specify relations without actually solving them.) With concolic execution, one can collect the constraints that an execution path encounters, and use it to answer questions about the program behavior at any point we prefer along the program execution path. We can further use concolic execution to enhance fuzzing. In this chapter, we explore in depth how to execute a Python function concolically, and how concolic execution can be used to enhance fuzzing. **Prerequisites** * You should have read the [chapter on coverage](Coverage.ipynb). * You should have read the [chapter on information flow](InformationFlow.ipynb). * A familiarity with the basic idea of [SMT solvers](https://en.wikipedia.org/wiki/Satisfiability_modulo_theories) would be useful. We first set up our infrastructure so that we can make use of previously defined functions. ``` import bookutils ``` ## Synopsis <!-- Automatically generated. Do not edit. --> To [use the code provided in this chapter](Importing.ipynb), write ```python >>> from fuzzingbook.ConcolicFuzzer import <identifier> ``` and then make use of the following features. This chapter defines two main classes: `SimpleConcolicFuzzer` and `ConcolicGrammarFuzzer`. The `SimpleConcolicFuzzer` first uses a sample input to collect predicates encountered. The fuzzer then negates random predicates to generate new input constraints. These, when solved, produce inputs that explore paths that are close to the original path. It can be used as follows. We first obtain the constraints using `ConcolicTracer`. ```python >>> with ConcolicTracer() as _: >>> _[cgi_decode]('a%20d') ``` These constraints are added to the concolic fuzzer as follows: ```python >>> scf = SimpleConcolicFuzzer() >>> scf.add_trace(_, 'a%20d') ``` The concolic fuzzer then uses the constraints added to guide its fuzzing as follows: ```python >>> scf = SimpleConcolicFuzzer() >>> for i in range(10): >>> v = scf.fuzz() >>> if v is None: >>> break >>> print(repr(v)) >>> with ExpectError(): >>> with ConcolicTracer() as _: >>> _[cgi_decode](v) >>> scf.add_trace(_, v) ' ' '%\\x00' '%\\x008\\x00' '%\\x00f\\x00' Traceback (most recent call last): File "<ipython-input-287-2a3454213b54>", line 9, in <module> _[cgi_decode](v) File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__ self.result = self.fn(*self.concolic(args)) File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode raise ValueError("Invalid encoding") ValueError: Invalid encoding (expected) Traceback (most recent call last): File "<ipython-input-287-2a3454213b54>", line 9, in <module> _[cgi_decode](v) File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__ self.result = self.fn(*self.concolic(args)) File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode raise ValueError("Invalid encoding") ValueError: Invalid encoding (expected) Traceback (most recent call last): File "<ipython-input-287-2a3454213b54>", line 9, in <module> _[cgi_decode](v) File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__ self.result = self.fn(*self.concolic(args)) File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode raise ValueError("Invalid encoding") ValueError: Invalid encoding (expected) '%\\x008\\x00' '%b\\x00' '+\\x00' Traceback (most recent call last): File "<ipython-input-287-2a3454213b54>", line 9, in <module> _[cgi_decode](v) File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__ self.result = self.fn(*self.concolic(args)) File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode raise ValueError("Invalid encoding") ValueError: Invalid encoding (expected) Traceback (most recent call last): File "<ipython-input-287-2a3454213b54>", line 9, in <module> _[cgi_decode](v) File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__ self.result = self.fn(*self.concolic(args)) File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode raise ValueError("Invalid encoding") ValueError: Invalid encoding (expected) '%b7\\x00' '%1\\x00' '%4\\x00' Traceback (most recent call last): File "<ipython-input-287-2a3454213b54>", line 9, in <module> _[cgi_decode](v) File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__ self.result = self.fn(*self.concolic(args)) File "<ipython-input-241-630ee123bed8>", line 39, in cgi_decode raise ValueError("Invalid encoding") ValueError: Invalid encoding (expected) Traceback (most recent call last): File "<ipython-input-287-2a3454213b54>", line 9, in <module> _[cgi_decode](v) File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__ self.result = self.fn(*self.concolic(args)) File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode raise ValueError("Invalid encoding") ValueError: Invalid encoding (expected) Traceback (most recent call last): File "<ipython-input-287-2a3454213b54>", line 9, in <module> _[cgi_decode](v) File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__ self.result = self.fn(*self.concolic(args)) File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode raise ValueError("Invalid encoding") ValueError: Invalid encoding (expected) ``` The `SimpleConcolicFuzzer` simply explores all paths near the original path traversed by the sample input. It uses a simple mechanism to explore the paths that are near the paths that it knows about, and other than code paths, knows nothing about the input. The `ConcolicGrammarFuzzer` on the other hand, knows about the input grammar, and can collect feedback from the subject under fuzzing. It can lift some of the constraints encountered to the grammar, enabling deeper fuzzing. It is used as follows: ```python >>> from InformationFlow import INVENTORY_GRAMMAR, SQLException >>> cgf = ConcolicGrammarFuzzer(INVENTORY_GRAMMAR) >>> cgf.prune_tokens(prune_tokens) >>> for i in range(10): >>> query = cgf.fuzz() >>> print(query) >>> with ConcolicTracer() as _: >>> with ExpectError(): >>> try: >>> res = _[db_select](query) >>> print(repr(res)) >>> except SQLException as e: >>> print(e) >>> cgf.update_grammar(_) >>> print() update R7 set J91=X8 where k*Z<c-p*e Table ('R7') was not found insert into qS2TWV (j) values ('w$w') Table ('qS2TWV') was not found delete from months where F*m*_*B/N<g+M(a)-o-_ Invalid WHERE ('F*m*_*B/N<g+M(a)-o-_') update e set e=:2A where 8.5!=-3 Table ('e') was not found delete from f28Z1 where R(-7.6)>Is3e(24)>M(C==b,l,E)*(23) Table ('f28Z1') was not found update fO8 set h=v,n=h,o=J where Z==((A)) Table ('fO8') was not found update vehicles set X=k01Pd where (((V81)))!=92.1 Column ('X') was not found select hI64i-wJ>B(WK5,m) from g where 6.0>84 Table ('g') was not found delete from f9l where B68(F)!=.*Q-P Table ('f9l') was not found select c/A!=Q==L==6,EIO(u<c,o,T) from bGs4 Table ('bGs4') was not found ``` ## Tracking Constraints In the chapter on [information flow](InformationFlow), we have seen how dynamic taints can be used to direct fuzzing by indicating which part of input reached interesting places. However, dynamic taint tracking is limited in the information that it can propagate. For example, we might want to explore what happens when certain properties of the input changes. For example, say we have a function `factorial()` that returns the *factorial value* of its input. ``` def factorial(n): if n < 0: return None if n == 0: return 1 if n == 1: return 1 v = 1 while n != 0: v = v * n n = n - 1 return v ``` We exercise the function with a value of `5`. ``` factorial(5) ``` Is this sufficient to explore all the features of the function? How do we know? One way to verify that we have explored all features is to look at the coverage obtained. First we need to extend the `Coverage` class from the [chapter on coverage](Coverage.ipynb) to provide us with coverage arcs. ``` from Coverage import Coverage import inspect class ArcCoverage(Coverage): def traceit(self, frame, event, args): if event != 'return': f = inspect.getframeinfo(frame) self._trace.append((f.function, f.lineno)) return self.traceit def arcs(self): t = [i for f, i in self._trace] return list(zip(t, t[1:])) ``` Next, we use the `Tracer` to obtain the coverage arcs. ``` with ArcCoverage() as cov: factorial(5) ``` We can now use the coverage arcs to visualize the coverage obtained. ``` from ControlFlow import to_graph, gen_cfg to_graph(gen_cfg(inspect.getsource(factorial)), arcs=cov.arcs()) ``` We see that the path `[1, 2, 4, 6, 8, 9, 10, 11, 12]` is covered (green) but sub-paths such as `[2, 3]`, `[4, 5]` and `[6, 7]` are unexplored (red). What we need is the ability to generate inputs such that the `True` branch is taken at `2`. How do we do that? ## Concolic Execution One way is to look at the execution path being taken, and collect the conditional constraints that the path encounters. Then we can try to produce inputs that lead us to taking the non-traversed path. First, let us step through the function. ``` lines = [i[1] for i in cov._trace if i[0] == 'factorial'] src = {i + 1: s for i, s in enumerate( inspect.getsource(factorial).split('\n'))} ``` * The line (1) is simply the entry point of the function. We know that the input is `n`, which is an integer. ``` src[1] ``` * The line (2) is a predicate `n < 0`. Since the next line taken is line (4), we know that at this point in the execution path, the predicate was `false`. ``` src[2], src[3], src[4] ``` We notice that this is one of the predicates where the `true` branch was not taken. How do we generate a value that takes the `true` branch here? One way is to use symbolic variables to represent the input, encode the constraint, and use an *SMT Solver* to solve the negation of the constraint. As we mentioned in the introduction to the chapter, a symbolic variable can be thought of as a sort of placeholder for the real variable, sort of like the `x` in solving for `x` in Algebra. These variables can be used to encode constraints placed on the variables in the program. We identify what constraints the variable is supposed to obey, and finally produce a value that obeys all constraints imposed. ## SMT Solvers To solve these constraints, one can use a _Satisfiability Modulo Theories_ (SMT) solver. An SMT solver is built on top of a _SATISFIABILITY_ (SAT) solver. A SAT solver is being used to check whether boolean formulas in first order logic (e.g `(a | b ) & (~a | ~b)`) can be satisfied using any assignments for the variables (e.g `a = true, b = false`). An SMT solver extends these SAT solvers to specific background theories -- for example, _theory of integers_, or _theory of strings_. That is, given a string constraint expressed as a formula with string variables (e.g `h + t == 'hello,world'`), an SMT solver that understands _theory of strings_ can be used to check if that constraint can be satisfied, and if satisfiable, provide an instantiation of concrete values for the variables used in the formula (e.g `h = 'hello,', t = 'world'`). We use the SMT solver, `Z3` in this chapter. ``` import z3 ``` To ensure that the string constraints we use in this chapter are successfully evaluated, we need to specify the `z3str3` solver. Further, we set the timeout for Z3 computations to to 30 seconds. ``` assert z3.get_version() >= (4, 8, 6, 0) z3.set_option('smt.string_solver', 'z3str3') z3.set_option('timeout', 30 * 1000) # milliseconds ``` Encoding the constraint requires declaring a corresponding symbolic variable to the input `n`. ``` zn = z3.Int('n') ``` Remember the constraint `(n < 0)` from line 2 in `factorial()`? We can now encode the constraint as follows. ``` zn < 0 ``` We previously traced `factorial(5)`. We saw that with input `5`, the execution took the `else` branch on the predicate `n < 0`. We can express this observation as follows. ``` z3.Not(zn < 0) ``` The `z3.solve()` method can also be used to check if the constraints are satisfiable, and if they are, provide values for variables such that the constraints are satisfied. For example, we can ask z3 for an input that will take the `else` branch as follows: ``` z3.solve(z3.Not(zn < 0)) ``` This is *a solution* (albeit a trivial one). SMT solvers can be used to solve much harder problems. For example, here is how one can solve a quadratic equation. ``` x = z3.Real('x') eqn = (2 * x**2 - 11 * x + 5 == 0) z3.solve(eqn) ``` Again, this is _one solution_. We can ask z3 to give us another solution as follows. ``` z3.solve(x != 5, eqn) ``` Indeed, both `x = 5` and `x = 1/2` are solutions to the quadratic equation $ 2x^2 -11x + 5 = 0 $ Similarly, we can ask *Z3* for an input that satisfies the constraint encoded in line 2 of `factorial()` so that we take the `if` branch. ``` z3.solve(zn < 0) ``` That is, if one uses `-1` as an input to `factorial()`, it is guaranteed to take the `if` branch in line 2 during execution. Let us try using that with our coverage. Here, the `-1` is the solution from above. ``` with cov as cov: factorial(-1) to_graph(gen_cfg(inspect.getsource(factorial)), arcs=cov.arcs()) ``` Ok, so we have managed to cover a little more of the graph. Let us continue with our original input of `factorial(5)`: * In line (4) we encounter a new predicate `n == 0`, for which we again took the false branch. ``` src[4] ``` The predicates required, to follow the path until this point are as follows. ``` predicates = [z3.Not(zn < 0), z3.Not(zn == 0)] ``` * If we continue to line (6), we encounter another predicate, for which again, we took the `false` branch ``` src[6] ``` The predicates encountered so far are as follows ``` predicates = [z3.Not(zn < 0), z3.Not(zn == 0), z3.Not(zn == 1)] ``` To take the branch at (6), we essentially have to obey the predicates until that point, but invert the last predicate. ``` last = len(predicates) - 1 z3.solve(predicates[0:-1] + [z3.Not(predicates[-1])]) ``` What we are doing here is tracing the execution corresponding to a particular input `factorial(5)`, using concrete values, and along with it, keeping *symbolic shadow variables* that enable us to capture the constraints. As we mentioned in the introduction, this particular method of execution where one tracks concrete execution using symbolic variables is called *Concolic Execution*. How do we automate this process? One method is to use a similar infrastructure as that of the chapter on [information flow](InformationFlow.ipynb), and use the Python inheritance to create symbolic proxy objects that can track the concrete execution. ## A Concolic Tracer Given that there is a symbolic context under which the program is executed (that is the symbolic variables that are used in the program execution) we define a context manager called `ConcolicTracer` that keeps track of the context. The `ConcolicTracer` accepts a single argument which contains the declarations for the symbolic variables seen so far, and the pre-conditions if any. ``` class ConcolicTracer: def __init__(self, context=None): self.context = context if context is not None else ({}, []) self.decls, self.path = self.context ``` We add the `enter` and `exit` methods for the context manager. ``` class ConcolicTracer(ConcolicTracer): def __enter__(self): return self def __exit__(self, exc_type, exc_value, tb): return ``` We use introspection to determine the arguments to the function, which is hooked into the `getitem` method. ``` class ConcolicTracer(ConcolicTracer): def __getitem__(self, fn): self.fn = fn self.fn_args = {i: None for i in inspect.signature(fn).parameters} return self ``` Finally, the function itself is invoked using the `call` method. ``` class ConcolicTracer(ConcolicTracer): def __call__(self, *args): self.result = self.fn(*self.concolic(args)) return self.result ``` For now, we define `concolic()` as a transparent function. It will be modified to produce symbolic variables later. ``` class ConcolicTracer(ConcolicTracer): def concolic(self, args): return args ``` It can be used as follows ``` with ConcolicTracer() as _: _[factorial](1) _.context ``` The `context` is empty as we are yet to hook up the necessary infrastructure to `ConcolicTracer`. ### Concolic Proxy Objects We now define the concolic proxy objects that can be used for concolic tracing. First, we define the `zproxy_create()` method that given a class name, correctly creates an instance of that class, and the symbolic corresponding variable, and registers the symbolic variable in the context information `context`. ``` def zproxy_create(cls, sname, z3var, context, zn, v=None): zv = cls(context, z3var(zn), v) context[0][zn] = sname return zv ``` #### A Proxy Class for Booleans First, we define the `zbool` class which is used to track the predicates encountered. It is a wrapper class that contains both symbolic (`z`) as well as concrete (`v`) values. The concrete value is used to determine which path to take, and the symbolic value is used to collect the predicates encountered. The initialization is done in two parts. The first one is using `zproxy_create()` to correctly initialize and register the shadow symbolic variable corresponding to the passed argument. This is used exclusively when the symbolic variable needs to be initialized first. In all other cases, the constructor is called with the preexisting symbolic value. ``` class zbool: @classmethod def create(cls, context, zn, v): return zproxy_create(cls, 'Bool', z3.Bool, context, zn, v) def __init__(self, context, z, v=None): self.context, self.z, self.v = context, z, v self.decl, self.path = self.context ``` Here is how it can be used. ``` with ConcolicTracer() as _: za, zb = z3.Ints('a b') val = zbool.create(_.context, 'my_bool_arg', True) print(val.z, val.v) _.context ``` ##### Negation of Encoded formula The `zbool` class allows negation of its concrete and symbolic values. ``` class zbool(zbool): def __not__(self): return zbool(self.context, z3.Not(self.z), not self.v) ``` Here is how it can be used. ``` with ConcolicTracer() as _: val = zbool.create(_.context, 'my_bool_arg', True).__not__() print(val.z, val.v) _.context ``` ##### Registering Predicates on Conditionals The `zbool` class is being used to track boolean conditions that arise during program execution. It tracks such conditions by registering the corresponding symbolic expressions in the context. ``` class zbool(zbool): def __bool__(self): r, pred = (True, self.z) if self.v else (False, z3.Not(self.z)) self.path.append(pred) return r ``` The `zbool` class can be used to keep track of boolean values and conditions encountered during the execution. For example, we can encode the conditions encountered by line 6 in `factorial()` as follows: First, we define the concrete value (`ca`), and its shadow symbolic variable (`za`). ``` ca, za = 5, z3.Int('a') ``` Then, we wrap it in `zbool`, and use it in a conditional, forcing the conditional to be registered in the context. ``` with ConcolicTracer() as _: if zbool(_.context, za == z3.IntVal(5), ca == 5): print('success') ``` We can retrieve the registered conditional as follows. ``` _.path ``` #### A Proxy Class for Integers Next, we define a symbolic wrapper `zint` for `int`. This class keeps track of the `int` variables used and the predicates encountered in `context`. Finally, it also keeps the concrete value so that it can be used to determine the path to take. As the `zint` extends the primitive `int` class, we have to define a _new_ method to open it for extension. ``` class zint(int): def __new__(cls, context, zn, v, *args, **kw): return int.__new__(cls, v, *args, **kw) ``` As in the case of `zbool`, the initialization takes place in two parts. The first using `create()` if a new symbolic argument is being registered, and then the usual initialization. ``` class zint(zint): @classmethod def create(cls, context, zn, v=None): return zproxy_create(cls, 'Int', z3.Int, context, zn, v) def __init__(self, context, z, v=None): self.z, self.v = z, v self.context = context ``` The `int` value of a `zint` object is its concrete value. ``` class zint(zint): def __int__(self): return self.v def __pos__(self): return self.v ``` Using these proxies is as follows. ``` with ConcolicTracer() as _: val = zint.create(_.context, 'int_arg', 0) print(val.z, val.v) _.context ``` The `zint` class is often used to do arithmetic with, or compare to other `int`s. These `int`s can be either a variable or a constant value. We define a helper method `_zv()` that checks what kind of `int` a given value is, and produces the correct symbolic equivalent. ``` class zint(zint): def _zv(self, o): return (o.z, o.v) if isinstance(o, zint) else (z3.IntVal(o), o) ``` It can be used as follows ``` with ConcolicTracer() as _: val = zint.create(_.context, 'int_arg', 0) print(val._zv(0)) print(val._zv(val)) ``` ##### Equality between Integers Two integers can be compared for equality using _ne_ and _eq_. ``` class zint(zint): def __ne__(self, other): z, v = self._zv(other) return zbool(self.context, self.z != z, self.v != v) def __eq__(self, other): z, v = self._zv(other) return zbool(self.context, self.z == z, self.v == v) ``` We also define _req_ using _eq_ in case the int being compared is on the left hand side. ``` class zint(zint): def __req__(self, other): return self.__eq__(other) ``` It can be used as follows. ``` with ConcolicTracer() as _: ia = zint.create(_.context, 'int_a', 0) ib = zint.create(_.context, 'int_b', 0) v1 = ia == ib v2 = ia != ib v3 = 0 != ib print(v1.z, v2.z, v3.z) ``` ##### Comparisons between Integers Integers can also be compared for ordering, and the methods for this are defined below. ``` class zint(zint): def __lt__(self, other): z, v = self._zv(other) return zbool(self.context, self.z < z, self.v < v) def __gt__(self, other): z, v = self._zv(other) return zbool(self.context, self.z > z, self.v > v) ``` We use the comparisons and equality operators to provide the other missing operators. ``` class zint(zint): def __le__(self, other): z, v = self._zv(other) return zbool(self.context, z3.Or(self.z < z, self.z == z), self.v < v or self.v == v) def __ge__(self, other): z, v = self._zv(other) return zbool(self.context, z3.Or(self.z > z, self.z == z), self.v > v or self.v == v) ``` These functions can be used as follows. ``` with ConcolicTracer() as _: ia = zint.create(_.context, 'int_a', 0) ib = zint.create(_.context, 'int_b', 1) v1 = ia > ib v2 = ia < ib print(v1.z, v2.z) v3 = ia >= ib v4 = ia <= ib print(v3.z, v4.z) ``` ##### Binary Operators for Integers We implement relevant arithmetic operators for integers as described in the [Python documentation](https://docs.python.org/3/reference/datamodel.html#object.__add__). (The commented out operators are not directly available for `z3.ArithRef`. They need to be implemented separately if needed. See the exercises for how it can be done.) ``` INT_BINARY_OPS = [ '__add__', '__sub__', '__mul__', '__truediv__', # '__div__', '__mod__', # '__divmod__', '__pow__', # '__lshift__', # '__rshift__', # '__and__', # '__xor__', # '__or__', '__radd__', '__rsub__', '__rmul__', '__rtruediv__', # '__rdiv__', '__rmod__', # '__rdivmod__', '__rpow__', # '__rlshift__', # '__rrshift__', # '__rand__', # '__rxor__', # '__ror__', ] def make_int_binary_wrapper(fname, fun, zfun): def proxy(self, other): z, v = self._zv(other) z_ = zfun(self.z, z) v_ = fun(self.v, v) if isinstance(v_, float): # we do not implement float results yet. assert round(v_) == v_ v_ = round(v_) return zint(self.context, z_, v_) return proxy INITIALIZER_LIST = [] def initialize(): for fn in INITIALIZER_LIST: fn() def init_concolic_1(): for fname in INT_BINARY_OPS: fun = getattr(int, fname) zfun = getattr(z3.ArithRef, fname) setattr(zint, fname, make_int_binary_wrapper(fname, fun, zfun)) INITIALIZER_LIST.append(init_concolic_1) init_concolic_1() with ConcolicTracer() as _: ia = zint.create(_.context, 'int_a', 0) ib = zint.create(_.context, 'int_b', 1) print((ia + ib).z) print((ia + 10).z) print((11 + ib).z) print((ia - ib).z) print((ia * ib).z) print((ia / ib).z) print((ia ** ib).z) ``` ##### Integer Unary Operators We also implement the relevant unary operators as below. ``` INT_UNARY_OPS = [ '__neg__', '__pos__', # '__abs__', # '__invert__', # '__round__', # '__ceil__', # '__floor__', # '__trunc__', ] def make_int_unary_wrapper(fname, fun, zfun): def proxy(self): return zint(self.context, zfun(self.z), fun(self.v)) return proxy def init_concolic_2(): for fname in INT_UNARY_OPS: fun = getattr(int, fname) zfun = getattr(z3.ArithRef, fname) setattr(zint, fname, make_int_unary_wrapper(fname, fun, zfun)) INITIALIZER_LIST.append(init_concolic_2) init_concolic_2() ``` We can use the unary operators we defined above as follows: ``` with ConcolicTracer() as _: ia = zint.create(_.context, 'int_a', 0) print((-ia).z) print((+ia).z) ``` ##### Using an Integer in a Boolean Context An integer may be converted to a boolean context in conditionals or as part of boolean predicates such as `or`, `and` and `not`. In these cases, the `__bool__()` method gets called. Unfortunately, this method requires a primitive boolean value. Hence, we force the current integer formula to a boolean predicate and register it in the current context. ``` class zint(zint): def __bool__(self): # return zbool(self.context, self.z, self.v) <-- not allowed # force registering boolean condition if self != 0: return True return False ``` It is used as follows ``` with ConcolicTracer() as _: za = zint.create(_.context, 'int_a', 1) zb = zint.create(_.context, 'int_b', 0) if za and zb: print(1) _.context ``` #### Remaining Methods of the ConcolicTracer We now complete some of the methods of the `ConcolicTracer`. ##### Translating to the SMT Expression Format Given that we are using an SMT Solver z3, it is often useful to retrieve the corresponding SMT expression for a symbolic expression. This can be used as an argument to `z3` or other SMT solvers. The format of the SMT expression ([SMT-LIB](http://smtlib.github.io/jSMTLIB/SMTLIBTutorial.pdf)) is as follows: * Variables declarations in [S-EXP](https://en.wikipedia.org/wiki/S-expression) format. E.g. The following declares a symbolic integer variable `x` ``` (declare-const x Int) ``` This declares a `bit vector` `b` of length `8` ``` (declare-const b (_ BitVec 8)) ``` This declares a symbolic real variable `r` ``` (declare-const x Real) ``` This declares a symbolic string variable `s` ``` (declare-const s String) ``` The declared variables can be used in logical formulas that are encoded in *S-EXP* format. For example, here is a logical formula. ``` (assert (and (= a b) (= a c) (! b c))) ``` Here is another example, using string variables. ``` (or (< 0 (str.indexof (str.substr my_str1 7 19) " where " 0)) (= (str.indexof (str.substr my_str1 7 19) " where " 0) 0)) ``` ``` class ConcolicTracer(ConcolicTracer): def smt_expr(self, show_decl=False, simplify=False, path=[]): r = [] if show_decl: for decl in self.decls: v = self.decls[decl] v = '(_ BitVec 8)' if v == 'BitVec' else v r.append("(declare-const %s %s)" % (decl, v)) path = path if path else self.path if path: path = z3.And(path) if show_decl: if simplify: return '\n'.join([ *r, "(assert %s)" % z3.simplify(path).sexpr() ]) else: return '\n'.join( [*r, "(assert %s)" % path.sexpr()]) else: return z3.simplify(path).sexpr() else: return '' ``` To see how to use `smt_expr()`, let us consider an example. The `triangle()` function is used to determine if the given sides to a triangle result in an `equilateral` triangle, an `isosceles` triangle, or a `scalene` triangle. It is implemented as follows. ``` def triangle(a, b, c): if a == b: if b == c: return 'equilateral' else: return 'isosceles' else: if b == c: return 'isosceles' else: if a == c: return 'isosceles' else: return 'scalene' triangle(1, 2, 1) ``` To translate make it run under `ConcolicTracer`, we first define the arguments. The triangle being defined has sides `1, 1, 1`. i.e. it is an `equilateral` triangle. ``` with ConcolicTracer() as _: za = zint.create(_.context, 'int_a', 1) zb = zint.create(_.context, 'int_b', 1) zc = zint.create(_.context, 'int_c', 1) triangle(za, zb, zc) print(_.context) ``` We can now call `smt_expr()` to retrieve the SMT expression as below. ``` print(_.smt_expr(show_decl=True)) ``` The collected predicates can also be solved directly using the Python z3 API. ``` z3.solve(_.path) ``` ##### Generating Fresh Names While using the proxy classes, we often will have to generate new symbolic variables, with names that have not been used before. For this, we define `fresh_name()` that always generates unique integers for names. ``` COUNTER = 0 def fresh_name(): global COUNTER COUNTER += 1 return COUNTER ``` It can be used as follows ``` fresh_name() def reset_counter(): global COUNTER COUNTER = 0 class ConcolicTracer(ConcolicTracer): def __enter__(self): reset_counter() return self def __exit__(self, exc_type, exc_value, tb): return ``` ##### Translating Arguments to Concolic Proxies We had previously defined `concolic()` as a transparent function. We now provide the full implementation of this function. It inspects a given function's parameters, and infers the parameter types from the concrete arguments passed in. It then uses this information to instantiate the correct proxy classes for each argument. ``` class ConcolicTracer(ConcolicTracer): def concolic(self, args): my_args = [] for name, arg in zip(self.fn_args, args): t = type(arg).__name__ zwrap = globals()['z' + t] vname = "%s_%s_%s_%s" % (self.fn.__name__, name, t, fresh_name()) my_args.append(zwrap.create(self.context, vname, arg)) self.fn_args[name] = vname return my_args ``` This is how it gets used: ``` with ConcolicTracer() as _: _[factorial](5) ``` With the new `concolic()` method, the arguments to the factorial are correctly associated with symbolic variables, which allows us to retrieve the predicates encountered. ``` _.context ``` As before, we can also print out the SMT expression which can be passed directly to command line SMT solvers. ``` print(_.smt_expr(show_decl=True)) ``` We next define methods to evaluate the SMT expression both in Python and from command line. ##### Evaluating the Concolic Expressions We define `zeval()` to solve the predicates in a context, and return results. It has two modes. The `python` mode uses `z3` Python API to solve and return the results. If the `python` mode is false, it writes the SMT expression to a file, and invokes the command line `z3` for a solution. ``` class ConcolicTracer(ConcolicTracer): def zeval(self, python=False, log=False): r, sol = (zeval_py if python else zeval_smt)(self.path, self, log) if r == 'sat': return r, {k: sol.get(self.fn_args[k], None) for k in self.fn_args} else: return r, None ``` ##### Using the Python API Given a set of predicates that the function encountered, and the tracer under which the function was executed, the `zeval_py()` function first declares the relevant symbolic variables, and uses the `z3.Solver()`to provide a set of inputs that would trace the same path through the function. ``` def zeval_py(path, cc, log): for decl in cc.decls: if cc.decls[decl] == 'BitVec': v = "z3.%s('%s', 8)" % (cc.decls[decl], decl) else: v = "z3.%s('%s')" % (cc.decls[decl], decl) exec(v) s = z3.Solver() s.add(z3.And(path)) if s.check() == z3.unsat: return 'No Solutions', {} elif s.check() == z3.unknown: return 'Gave up', None assert s.check() == z3.sat m = s.model() return 'sat', {d.name(): m[d] for d in m.decls()} ``` It can be used as follows: ``` with ConcolicTracer() as _: _[factorial](5) _.zeval(python=True) ``` That is, given the set of constraints, the assignment `n == 5` conforms to all constraints. ##### Using the Command Line The `zeval_smt()` function writes the SMT expression to the file system, and calls the `z3` SMT solver command line to solve it. The result of SMT expression is again an `sexpr`. Hence, we first define `parse_sexp()` to parse and return the correct values. ``` import re import subprocess SEXPR_TOKEN = r'''(?mx) \s*(?: (?P<bra>\()| (?P<ket>\))| (?P<token>[^"()\s]+)| (?P<string>"[^"]*") )''' def parse_sexp(sexp): stack, res = [], [] for elements in re.finditer(SEXPR_TOKEN, sexp): kind, value = [(t, v) for t, v in elements.groupdict().items() if v][0] if kind == 'bra': stack.append(res) res = [] elif kind == 'ket': last, res = res, stack.pop(-1) res.append(last) elif kind == 'token': res.append(value) elif kind == 'string': res.append(value[1:-1]) else: assert False return res ``` The `parse_sexp()` function can be used as follows ``` parse_sexp('abcd (hello 123 (world "hello world"))') ``` We now define `zeval_smt()` which uses the `z3` command line directly, and uses `parse_sexp()` to parse and return the solutions to function arguments if any. ``` import tempfile def zeval_smt(path, cc, log): s = cc.smt_expr(True, True, path) with tempfile.NamedTemporaryFile(mode='w', suffix='.smt') as f: f.write(s) f.write("\n(check-sat)") f.write("\n(get-model)") f.flush() if log: print(s, '(check-sat)', '(get-model)', sep='\n') output = subprocess.getoutput("z3 -t:60 " + f.name) if log: print(output) o = parse_sexp(output) if not o: return 'Gave up', None kind = o[0] if kind == 'unknown': return 'Gave up', None elif kind == 'unsat': return 'No Solutions', {} assert kind == 'sat' assert o[1][0] == 'model' return 'sat', {i[1]: (i[-1], i[-2]) for i in o[1][1:]} ``` We can now use `zeval()` as follows. ``` with ConcolicTracer() as _: _[factorial](5) _.zeval(log=True) ``` Indeed, we get similar results (`n == 5`) from using the command line as from using the Python API. #### A Proxy Class for Strings Here, we define the proxy string class `zstr`. First we define our initialization routines. Since `str` is a primitive type, we define `new` to extend it. ``` class zstr(str): def __new__(cls, context, zn, v): return str.__new__(cls, v) ``` As before, initialization proceeds with `create()` and the constructor. ``` class zstr(zstr): @classmethod def create(cls, context, zn, v=None): return zproxy_create(cls, 'String', z3.String, context, zn, v) def __init__(self, context, z, v=None): self.context, self.z, self.v = context, z, v self._len = zint(context, z3.Length(z), len(v)) #self.context[1].append(z3.Length(z) == z3.IntVal(len(v))) ``` We also define `_zv()` helper to help us with methods that accept another string ``` class zstr(zstr): def _zv(self, o): return (o.z, o.v) if isinstance(o, zstr) else (z3.StringVal(o), o) ``` ##### Retrieving Ordinal Value We define `zord` that given a symbolic one character long string, obtains the `ord()` for that. It returns two values. The first one is the variable that corresponds to `ord()`, and second is the predicate that links the variable to the passed in single character string. ``` def zord(context, c): bn = "bitvec_%d" % fresh_name() v = z3.BitVec(bn, 8) context[0][bn] = 'BitVec' z = (z3.Unit(v) == c) context[1].append(z) return v ``` We use it as follows ``` zc = z3.String('arg_%d' % fresh_name()) with ConcolicTracer() as _: zi = zord(_.context, zc) ``` The symbolic bitvector is in `zi`. It is linked to the passed in argument in `context` ``` _.context ``` We can specify what the result of `ord()` should be, and call `z3.solve()` to provide us with a solution that will provide the required result. ``` z3.solve(_.path + [zi == 65]) ``` ##### Translating an Ordinal Value to ASCII Similarly, we can convert the ASCII value back to a single character string using `zchr()` ``` def zchr(context, i): sn = 'string_%d' % fresh_name() s = z3.String(sn) context[0][sn] = 'String' z = z3.And([s == z3.Unit(i), z3.Length(s) == 1]) context[1].append(z) return s ``` For using it, we first define a bitvector that is 8 bits long. ``` i = z3.BitVec('bv_%d' % fresh_name(), 8) ``` We can now retrieve the `chr()` representation as below. ``` with ConcolicTracer() as _: zc = zchr(_.context, i) _.context ``` As before, we can specify what the end result of calling `chr()` should be to get the original argument. ``` z3.solve(_.path + [zc == z3.StringVal('a')]) ``` ##### Equality between Strings The equality of `zstr` is defined similar to that of `zint` ``` class zstr(zstr): def __eq__(self, other): z, v = self._zv(other) return zbool(self.context, self.z == z, self.v == v) def __req__(self, other): return self.__eq__(other) ``` The `zstr` class is used as follows. ``` def tstr1(s): if s == 'h': return True else: return False with ConcolicTracer() as _: r = _[tstr1]('h') _.zeval() ``` It works even if we have more than one character. ``` def tstr1(s): if s == 'hello world': return True else: return False with ConcolicTracer() as _: r = _[tstr1]('hello world') _.context _.zeval() ``` ##### Concatenation of Strings What if we need to concatenate two strings? We need additional helpers to accomplish that. ``` class zstr(zstr): def __add__(self, other): z, v = self._zv(other) return zstr(self.context, self.z + z, self.v + v) def __radd__(self, other): return self.__add__(other) ``` Here is how it can be used. First, we create the wrapped arguments ``` with ConcolicTracer() as _: v1, v2 = [zstr.create(_.context, 'arg_%d' % fresh_name(), s) for s in ['hello', 'world']] if (v1 + ' ' + v2) == 'hello world': print('hello world') ``` The addition of symbolic variables is preserved in `context` ``` _.context ``` ##### Producing Substrings Similarly, accessing substrings also require extra help. ``` class zstr(zstr): def __getitem__(self, idx): if isinstance(idx, slice): start, stop, step = idx.indices(len(self.v)) assert step == 1 # for now assert stop >= start # for now rz = z3.SubString(self.z, start, stop - start) rv = self.v[idx] elif isinstance(idx, int): rz = z3.SubString(self.z, idx, 1) rv = self.v[idx] else: assert False # for now return zstr(self.context, rz, rv) def __iter__(self): return zstr_iterator(self.context, self) ``` ##### An Iterator Class for Strings We define the iterator as follows. ``` class zstr_iterator(): def __init__(self, context, zstr): self.context = context self._zstr = zstr self._str_idx = 0 self._str_max = zstr._len # intz is not an _int_ def __next__(self): if self._str_idx == self._str_max: # intz#eq raise StopIteration c = self._zstr[self._str_idx] self._str_idx += 1 return c def __len__(self): return self._len ``` Here is how it can be used. ``` def tstr2(s): if s[0] == 'h' and s[1] == 'e' and s[3] == 'l': return True else: return False with ConcolicTracer() as _: r = _[tstr2]('hello') ``` Again, the context shows predicates encountered. ``` _.context ``` The function `zeval()` returns a solution for the predicate. Note that the value returned is not exactly the argument that we passed in. This is a consequence of the predicates we have. That is, we have no constraints on what the character value on `s[2]` should be. ``` _.zeval() ``` ##### Translating to Upper and Lower Equivalents A major complication is supporting `upper()` and `lower()` methods. We use the previously defined `zchr()` and `zord()` functions to accomplish this. ``` class zstr(zstr): def upper(self): empty = '' ne = 'empty_%d' % fresh_name() result = zstr.create(self.context, ne, empty) self.context[1].append(z3.StringVal(empty) == result.z) cdiff = (ord('a') - ord('A')) for i in self: oz = zord(self.context, i.z) uz = zchr(self.context, oz - cdiff) rz = z3.And([oz >= ord('a'), oz <= ord('z')]) ov = ord(i.v) uv = chr(ov - cdiff) rv = ov >= ord('a') and ov <= ord('z') if zbool(self.context, rz, rv): i = zstr(self.context, uz, uv) else: i = zstr(self.context, i.z, i.v) result += i return result ``` The `lower()` function is similar to `upper()` except that the character ranges are switched, and the lowercase is above uppercase. Hence, we add the difference to the ordinal to make a character to lowercase. ``` class zstr(zstr): def lower(self): empty = '' ne = 'empty_%d' % fresh_name() result = zstr.create(self.context, ne, empty) self.context[1].append(z3.StringVal(empty) == result.z) cdiff = (ord('a') - ord('A')) for i in self: oz = zord(self.context, i.z) uz = zchr(self.context, oz + cdiff) rz = z3.And([oz >= ord('A'), oz <= ord('Z')]) ov = ord(i.v) uv = chr(ov + cdiff) rv = ov >= ord('A') and ov <= ord('Z') if zbool(self.context, rz, rv): i = zstr(self.context, uz, uv) else: i = zstr(self.context, i.z, i.v) result += i return result ``` Here is how it is used. ``` def tstr3(s): if s.upper() == 'H': return True else: return False with ConcolicTracer() as _: r = _[tstr3]('h') ``` Again, we use `zeval()` to solve the collected constraints, and verify that our constraints are correct. ``` _.zeval() ``` Here is a larger example using `upper()` ``` def tstr4(s): if s.lower() == 'hello world': return True else: return False with ConcolicTracer() as _: r = _[tstr4]('Hello World') _.zeval() ``` Again, we obtain the right input value. ##### Checking for String Prefixes We define `startswith()`. ``` class zstr(zstr): def startswith(self, other, beg=0, end=None): assert end is None # for now assert isinstance(beg, int) # for now zb = z3.IntVal(beg) others = other if isinstance(other, tuple) else (other, ) last = False for o in others: z, v = self._zv(o) r = z3.IndexOf(self.z, z, zb) last = zbool(self.context, r == zb, self.v.startswith(v)) if last: return last return last ``` An example. ``` def tstr5(s): if s.startswith('hello'): return True else: return False with ConcolicTracer() as _: r = _[tstr5]('hello world') _.zeval() with ConcolicTracer() as _: r = _[tstr5]('my world') _.zeval() ``` As before, the predicates only ensure that the `startswith()` returned a true value. Hence, our solution reflects that. ##### Finding Substrings We also define `find()` ``` class zstr(zstr): def find(self, other, beg=0, end=None): assert end is None # for now assert isinstance(beg, int) # for now zb = z3.IntVal(beg) z, v = self._zv(other) zi = z3.IndexOf(self.z, z, zb) vi = self.v.find(v, beg, end) return zint(self.context, zi, vi) ``` An example. ``` def tstr6(s): if s.find('world') != -1: return True else: return False with ConcolicTracer() as _: r = _[tstr6]('hello world') _.zeval() ``` As before, the predicates only ensure that the `find()` returned a value greater than -1. Hence, our solution reflects that. ##### Remove Space from Ends We next implement `strip()`. ``` import string class zstr(zstr): def rstrip(self, chars=None): if chars is None: chars = string.whitespace if self._len == 0: return self else: last_idx = self._len - 1 cz = z3.SubString(self.z, last_idx.z, 1) cv = self.v[-1] zcheck_space = z3.Or([cz == z3.StringVal(char) for char in chars]) vcheck_space = any(cv == char for char in chars) if zbool(self.context, zcheck_space, vcheck_space): return zstr(self.context, z3.SubString(self.z, 0, last_idx.z), self.v[0:-1]).rstrip(chars) else: return self def tstr7(s): if s.rstrip(' ') == 'a b': return True else: return False with ConcolicTracer() as _: r = _[tstr7]('a b ') print(r) _.zeval() class zstr(zstr): def lstrip(self, chars=None): if chars is None: chars = string.whitespace if self._len == 0: return self else: first_idx = 0 cz = z3.SubString(self.z, 0, 1) cv = self.v[0] zcheck_space = z3.Or([cz == z3.StringVal(char) for char in chars]) vcheck_space = any(cv == char for char in chars) if zbool(self.context, zcheck_space, vcheck_space): return zstr(self.context, z3.SubString( self.z, 1, self._len.z), self.v[1:]).lstrip(chars) else: return self def tstr8(s): if s.lstrip(' ') == 'a b': return True else: return False with ConcolicTracer() as _: r = _[tstr8](' a b') print(r) _.zeval() class zstr(zstr): def strip(self, chars=None): return self.lstrip(chars).rstrip(chars) ``` Example usage. ``` def tstr9(s): if s.strip() == 'a b': return True else: return False with ConcolicTracer() as _: r = _[tstr9](' a b ') print(r) _.zeval() ``` The `strip()` has generated the right constraints. ##### Splitting Strings We implement string `split()` as follows. ``` class zstr(zstr): def split(self, sep=None, maxsplit=-1): assert sep is not None # default space based split is complicated assert maxsplit == -1 # for now. zsep = z3.StringVal(sep) zl = z3.Length(zsep) # zi would be the length of prefix zi = z3.IndexOf(self.z, zsep, z3.IntVal(0)) # Z3Bug: There is a bug in the `z3.IndexOf` method which returns # `z3.SeqRef` instead of `z3.ArithRef`. So we need to fix it. zi = z3.ArithRef(zi.ast, zi.ctx) vi = self.v.find(sep) if zbool(self.context, zi >= z3.IntVal(0), vi >= 0): zprefix = z3.SubString(self.z, z3.IntVal(0), zi) zmid = z3.SubString(self.z, zi, zl) zsuffix = z3.SubString(self.z, zi + zl, z3.Length(self.z)) return [zstr(self.context, zprefix, self.v[0:vi])] + zstr( self.context, zsuffix, self.v[vi + len(sep):]).split( sep, maxsplit) else: return [self] def tstr10(s): if s.split(',') == ['a', 'b', 'c']: return True else: return False with ConcolicTracer() as _: r = _[tstr10]('a,b,c') print(r) _.zeval() ``` ##### Trip Wire For easier debugging, we abort any calls to methods in `str` that are not overridden by `zstr`. ``` def make_str_abort_wrapper(fun): def proxy(*args, **kwargs): raise Exception('%s Not implemented in `zstr`' % fun.__name__) return proxy def init_concolic_3(): strmembers = inspect.getmembers(zstr, callable) zstrmembers = {m[0] for m in strmembers if len( m) == 2 and 'zstr' in m[1].__qualname__} for name, fn in inspect.getmembers(str, callable): # Omitted 'splitlines' as this is needed for formatting output in # IPython/Jupyter if name not in zstrmembers and name not in [ 'splitlines', '__class__', '__contains__', '__delattr__', '__dir__', '__format__', '__ge__', '__getattribute__', '__getnewargs__', '__gt__', '__hash__', '__le__', '__len__', '__lt__', '__mod__', '__mul__', '__ne__', '__reduce__', '__reduce_ex__', '__repr__', '__rmod__', '__rmul__', '__setattr__', '__sizeof__', '__str__']: setattr(zstr, name, make_str_abort_wrapper(fn)) INITIALIZER_LIST.append(init_concolic_3) init_concolic_3() ``` ## Examples ### Triangle We previously showed how to run `triangle()` under `ConcolicTracer`. ``` with ConcolicTracer() as _: print(_[triangle](1, 2, 3)) ``` The predicates are as follows: ``` _.path _.zeval() ``` We can modify the predicates if necessary. First, we retrieve the symbolic variables. ``` za, zb, zc = [z3.Int(s) for s in _.context[0].keys()] ``` Then, we pass a modified predicate to `zeval()`. The key determines which predicate the new predicate will replace. ``` _.zeval({1: zb == zc}) triangle(1, 0, 1) ``` The updated predicate returns `isosceles` as expected. ### Round Here is a function that gives you the nearest ten's multiplier ``` def round10(r): while r % 10 != 0: r += 1 return r ``` As before, we execute the function under the `ConcolicTracer` context. ``` with ConcolicTracer() as _: r = _[round10](1) ``` We verify that we were able to capture all the predicates ``` _.context ``` We use `zeval()` to obtain results. ``` _.zeval() ``` ### Absolute Maximum Does our concolic proxies work across functions? Say we have a function `max_value()` as below. ``` def abs_value(a): if a > 0: return a else: return -a ``` It is called by another function `abs_max()` ``` def abs_max(a, b): a1 = abs_value(a) b1 = abs_value(b) if a1 > b1: c = a1 else: c = b1 return c ``` Using the `Concolic()` context on `abs_max()`. ``` with ConcolicTracer() as _: _[abs_max](2, 1) ``` As expected, we have the predicates across functions. ``` _.context _.zeval() ``` Solving the predicates works as expected. Using negative numbers as arguments so that a different branch is taken in `abs_value()` ``` with ConcolicTracer() as _: _[abs_max](-2, -1) _.context _.zeval() ``` The solution reflects our predicates. (We used `a > 0` in `abs_value()`). ### Binomial Coefficient For a larger example that uses different kinds of variables, say we want to compute the binomial coefficient by the following formulas $$ ^nP_k=\frac{n!}{(n-k)!} $$ $$ \binom nk=\,^nC_k=\frac{^nP_k}{k!} $$ we define the functions as follows. ``` def factorial(n): v = 1 while n != 0: v *= n n -= 1 return v def permutation(n, k): return factorial(n) / factorial(n - k) def combination(n, k): return permutation(n, k) / factorial(k) def binomial(n, k): if n < 0 or k < 0 or n < k: raise Exception('Invalid values') return combination(n, k) ``` As before, we run the function under `ConcolicTracer`. ``` with ConcolicTracer() as _: v = _[binomial](4, 2) ``` Then call `zeval()` to evaluate. ``` _.zeval() ``` ### Database For a larger example using the Concolic String class `zstr`, We use the DB class from the [chapter on information flow](InformationFlow.ipynb). ``` from InformationFlow import DB, sample_db, update_inventory ``` We first populate our database. ``` from GrammarMiner import VEHICLES # minor dependency db = sample_db() for V in VEHICLES: update_inventory(db, V) db.db ``` We are now ready to fuzz our `DB` class. Hash functions are difficult to handle directly (because they rely on internal C functions). Hence we modify `table()` slightly. ``` class ConcolicDB(DB): def table(self, t_name): for k, v in self.db: if t_name == k: return v raise SQLException('Table (%s) was not found' % repr(t_name)) def column(self, decl, c_name): for k in decl: if c_name == k: return decl[k] raise SQLException('Column (%s) was not found' % repr(c_name)) ``` To make it easy, we define a single function `db_select()` that directly invokes `db.sql()`. ``` def db_select(s): my_db = ConcolicDB() my_db.db = [(k, v) for (k, v) in db.db.items()] r = my_db.sql(s) return r ``` We now want to run SQL statements under our `ConcolicTracer`, and collect predicates obtained. ``` with ConcolicTracer() as _: _[db_select]('select kind from inventory') ``` The predicates encountered during the execution are as follows: ``` _.path ``` We can use `zeval()` as before to solve the constraints. ``` _.zeval() ``` ## Fuzzing with Constraints In this section, we show how to use the infrastructure we built for concolic execution for guiding fuzzing. ### SimpleConcolicFuzzer The `SimpleConcolicFuzzer` starts with a sample input generated by some other fuzzer. It then runs the function being tested under `ConcolicTracer`, and collects the path predicates. It then negates random predicates within the path and solves it with *z3* to produce a new output that is guaranteed to take a different path than the original. First, we import the `Fuzzer` interface, and an example program `hang_if_no_space()` ``` from Fuzzer import Fuzzer, hang_if_no_space from ExpectError import ExpectTimeout, ExpectError import random ``` To make the fuzzer work, we need a way to represent decisions made during trace. We keep this in a binary tree where each node represents a decision made, and each leaf represents a complete path. A node in the binary tree is represented by the `TraceNode` class. When a new node is added, it represents a decision taken by the parent on some predicate. This predicate is supplied as `smt_val`, which is `True` for this child to be reached. Since the predicate is actually present in the parent node, we also carry a member `smt` which will be updated by the first child to be added. ``` class TraceNode: def __init__(self, smt_val, parent, info): # This is the smt that lead to this node self._smt_val = z3.simplify(smt_val) if smt_val is not None else None # This is the predicate that this node might perform at a future point self.smt = None self.info = info self.parent = parent self.children = {} self.path = None self.tree = None self._pattern = None self.log = True def no(self): return self.children.get(self.tree.no_bit) def yes(self): return self.children.get(self.tree.yes_bit) def get_children(self): return (self.no(), self.yes()) def __str__(self): return 'TraceNode[%s]' % ','.join(self.children.keys()) ``` We add a `PlausibleChild` class to track the leaf nodes. ``` class PlausibleChild: def __init__(self, parent, cond, tree): self.parent = parent self.cond = cond self.tree = tree self._smt_val = None def __repr__(self): return 'PlausibleChild[%s]' % (self.parent.pattern() + ':' + self.cond) ``` When the leaf nodes are used to generate new paths, we expect its sibling `TraceNode` to have been already explored. Hence, we make use of the sibling's values for context `cc`, and the `smt_val` from the parent. ``` class PlausibleChild(PlausibleChild): def smt_val(self): if self._smt_val is not None: return self._smt_val # if the parent has other children, then that child would have updatd the parent's smt # Hence, we can use that child's smt_value's opposite as our value. assert self.parent.smt is not None if self.cond == self.tree.no_bit: self._smt_val = z3.Not(self.parent.smt) else: self._smt_val = self.parent.smt return self._smt_val def cc(self): if self.parent.info.get('cc') is not None: return self.parent.info['cc'] # if there is a plausible child node, it means that there can # be at most one child. sibilings = list(self.parent.children.values()) assert len(sibilings) == 1 # We expect at the other child to have cc return sibilings[0].info['cc'] ``` The `PlausibleChild` instance is used to generate new paths to explore using `path_expression()`. ``` class PlausibleChild(PlausibleChild): def path_expression(self): path_to_root = self.parent.get_path_to_root() assert path_to_root[0]._smt_val is None return [i._smt_val for i in path_to_root[1:]] + [self.smt_val()] ``` The `TraceTree` class helps us keep track of the binary tree. In the beginning, the root is a sentinel `TraceNode` instance, and simply have two plausible children as leaves. As soon as the first trace is added, one of the plausible children will become a true child. ``` class TraceTree: def __init__(self): self.root = TraceNode(smt_val=None, parent=None, info={'num': 0}) self.root.tree = self self.leaves = {} self.no_bit, self.yes_bit = '0', '1' pprefix = ':' for bit in [self.no_bit, self.yes_bit]: self.leaves[pprefix + bit] = PlausibleChild(self.root, bit, self) self.completed_paths = {} ``` The `add_trace()` method of the `TraceTree` provides a way for new traces to be added. It is kept separate from the initialization as we might want to add more than one trace from the same function. ``` class TraceTree(TraceTree): def add_trace(self, tracer, string): last = self.root i = 0 for i, elt in enumerate(tracer.path): last = last.add_child(elt=elt, i=i + 1, cc=tracer, string=string) last.add_child(elt=z3.BoolVal(True), i=i + 1, cc=tracer, string=string) ``` To make `add_trace()` work, we need a little more infrastructure, that we define below. The `bit()` method translates a predicate to a bit that corresponds to the decision taken at each predicate. If the `if` branch is taken, the result is `1`, while `else` branch is indicated by `0`. The pattern indicates the bit-pattern of decisions required to reach the leaf from the root. ``` class TraceNode(TraceNode): def bit(self): if self._smt_val is None: return None return self.tree.no_bit if self._smt_val.decl( ).name() == 'not' else self.tree.yes_bit def pattern(self): if self._pattern is not None: return self._pattern path = self.get_path_to_root() assert path[0]._smt_val is None assert path[0].parent is None self._pattern = ''.join([p.bit() for p in path[1:]]) return self._pattern ``` Each node knows how to add a new child, and get the path to root, which is cached. When we add a child to the root node, it means that there was a decision in the current node, and the child is the result of the decision. Hence, to get the decision being made, we simplify the `smt` expression, and check if it starts with `not`. If it does not start with a `not`, we interpret that as the current decision in the node. If it starts with `not`, then we interpret that `not(smt)` was the expression being evaluated in the current node. We know the first decision made only after going through the program at least once. As soon as the program is traversed, we update the parent with the decision that resulted in the current child. ``` class TraceNode(TraceNode): def add_child(self, elt, i, cc, string): if elt == z3.BoolVal(True): # No more exploration here. Simply unregister the leaves of *this* # node and possibly register them in completed nodes, and exit for bit in [self.tree.no_bit, self.tree.yes_bit]: child_leaf = self.pattern() + ':' + bit if child_leaf in self.tree.leaves: del self.tree.leaves[child_leaf] self.tree.completed_paths[self.pattern()] = self return None child_node = TraceNode(smt_val=elt, parent=self, info={'num': i, 'cc': cc, 'string': string}) child_node.tree = self.tree # bit represents the path that child took from this node. bit = child_node.bit() # first we update our smt decision if bit == self.tree.yes_bit: # yes, which means the smt can be used as is if self.smt is not None: assert self.smt == child_node._smt_val else: self.smt = child_node._smt_val # no, which means we have to negate it to get the decision. elif bit == self.tree.no_bit: smt_ = z3.simplify(z3.Not(child_node._smt_val)) if self.smt is not None: assert smt_ == self.smt else: self.smt = smt_ else: assert False if bit in self.children: # if self.log: #print(elt, child_node.bit(), i, string) #print(i,'overwriting', bit,'=>',self.children[bit],'with',child_node) child_node = self.children[bit] #self.children[bit] = child_node #child_node.children = old.children else: self.children[bit] = child_node # At this point, we have to unregister any leaves that correspond to this child from tree, # and add the plausible children of this child as leaves to be explored. Note that # if it is the end (z3.True), we do not have any more children. child_leaf = self.pattern() + ':' + bit if child_leaf in self.tree.leaves: del self.tree.leaves[child_leaf] pprefix = child_node.pattern() + ':' # Plausible children. for bit in [self.tree.no_bit, self.tree.yes_bit]: self.tree.leaves[pprefix + bit] = PlausibleChild(child_node, bit, self.tree) return child_node ``` The path to root from any node is computed once and cached. ``` class TraceNode(TraceNode): def get_path_to_root(self): if self.path is not None: return self.path parent_path = [] if self.parent is not None: parent_path = self.parent.get_path_to_root() self.path = parent_path + [self] return self.path ``` The `SimpleConcolicFuzzer` is defined with the `Fuzzer` interface. ``` class SimpleConcolicFuzzer(Fuzzer): def __init__(self): self.ct = TraceTree() self.max_tries = 1000 self.last = None self.last_idx = None ``` The `add_trace()` method we defined earlier is used as follows. First, we use a random string to generate the concolic trace. ``` with ExpectTimeout(2): with ConcolicTracer() as _: _[hang_if_no_space]('ab d') ``` Next, we initialize and add this trace to the fuzzer. ``` _.path scf = SimpleConcolicFuzzer() scf.ct.add_trace(_, 'ab d') ``` The path we added above can be obtained from the `TraceTree` as below. ``` [i._smt_val for i in scf.ct.root.get_children()[0].get_children()[ 0].get_children()[1].get_path_to_root()] ``` Below are the registered leaves that we can explore at this moment. ``` for key in scf.ct.leaves: print(key, '\t', scf.ct.leaves[key]) ``` Next, we need a way to visualize the constructed tree. ``` from GrammarFuzzer import display_tree TREE_NODES = {} def my_extract_node(tnode, id): key, node, parent = tnode if node is None: # return '? (%s:%s)' % (parent.pattern(), key) , [], '' return '?', [], '' if node.smt is None: return '* %s' % node.info.get('string', ''), [], '' no, yes = node.get_children() num = str(node.info.get('num')) children = [('0', no, node), ('1', yes, node)] TREE_NODES[id] = 0 return "(%s) %s" % (num, str(node.smt)), children, '' def my_edge_attr(dot, start_node, stop_node): # the edges are always drawn '0:NO' first. if TREE_NODES[start_node] == 0: color, label = 'red', '0' TREE_NODES[start_node] = 1 else: color, label = 'blue', '1' TREE_NODES[start_node] = 2 dot.edge(repr(start_node), repr(stop_node), color=color, label=label) def display_trace_tree(root): TREE_NODES.clear() return display_tree( ('', root, None), extract_node=my_extract_node, edge_attr=my_edge_attr) display_trace_tree(scf.ct.root) ``` For example, the pattern `00:0` corresponds to the following predicates. ``` scf.ct.leaves['00:0'] scf.ct.leaves['00:0'].path_expression() ``` Similarly the pattern `:1` corresponds to the following predicates. ``` scf.ct.leaves[':1'] scf.ct.leaves[':1'].path_expression() ``` We can now generate the next input to be generated by looking for the a leaf that is incompletely explored. The idea is to collect all leaf nodes, and choose one at random. ``` class SimpleConcolicFuzzer(SimpleConcolicFuzzer): def add_trace(self, trace, s): self.ct.add_trace(trace, s) def next_choice(self): #lst = sorted(list(self.ct.leaves.keys()), key=len) c = random.choice(list(self.ct.leaves.keys())) #c = lst[0] return self.ct.leaves[c] ``` We use the `next_choice()` as follows. ``` scf = SimpleConcolicFuzzer() scf.add_trace(_, 'ab d') node = scf.next_choice() node node.path_expression() ``` We get the next choice for exploration, and expand the path expression, and return it together with a context using `get_newpath()` ``` class SimpleConcolicFuzzer(SimpleConcolicFuzzer): def get_newpath(self): node = self.next_choice() path = node.path_expression() return path, node.cc() scf = SimpleConcolicFuzzer() scf.add_trace(_, 'abcd') path, cc = scf.get_newpath() path ``` #### Fuzz The `fuzz()` method simply generates new lists of predicates, and solves them to produce new inputs. ``` class SimpleConcolicFuzzer(SimpleConcolicFuzzer): def fuzz(self): if self.ct.root.children == {}: # a random value to generate comparisons. This would be # the initial value around which we explore with concolic # fuzzing. return ' ' for i in range(self.max_tries): path, last = self.get_newpath() s, v = zeval_smt(path, last, log=False) if s != 'sat': #raise Exception("Unexpected UNSAT") continue val = list(v.values())[0] elt, typ = val if len(elt) == 2 and elt[0] == '-': # negative numbers are [-, x] elt = '-%s' % elt[1] # make sure that we do not retry the tried paths # The tracer we add here is incomplete. This gets updated when # the add_trace is called from the concolic fuzzer context. # self.add_trace(ConcolicTracer((last.decls, path)), elt) if typ == 'Int': return int(elt) elif typ == 'String': return elt return elt return None ``` We now fuzz. ``` scf = SimpleConcolicFuzzer() scf.fuzz() ``` Here is an example program `cgi_decode()`. Note that we will not be able to use the `cgi_decode()` from the `Coverage` chapter directly as the hash lookups in `hex_values` can not be used for transferring constraints yet. ``` def cgi_decode(s): """Decode the CGI-encoded string `s`: * replace "+" by " " * replace "%xx" by the character with hex number xx. Return the decoded string. Raise `ValueError` for invalid inputs.""" # Mapping of hex digits to their integer values hex_values = { '0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15, 'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15, } t = '' i = 0 while i < len(s): c = s[i] if c == '+': t += ' ' elif c == '%': digit_high, digit_low = s[i + 1], s[i + 2] i = i + 2 found = 0 v = 0 for key in hex_values: if key == digit_high: found = found + 1 v = hex_values[key] * 16 break for key in hex_values: if key == digit_low: found = found + 1 v = v + hex_values[key] break if found == 2: if v >= 128: # z3.StringVal(urllib.parse.unquote('%80')) <-- bug in z3 raise ValueError("Invalid encoding") t = t + chr(v) else: raise ValueError("Invalid encoding") else: t = t + c i = i + 1 return t with ConcolicTracer() as _: _[cgi_decode]('a+c') scf = SimpleConcolicFuzzer() scf.add_trace(_, 'a+c') display_trace_tree(scf.ct.root) ``` So, we fuzz to get a new path. ``` v = scf.fuzz() v ``` We can now obtain the new trace as before. ``` with ExpectError(): with ConcolicTracer() as _: _[cgi_decode](v) ``` The new trace is added to our fuzzer using `add_trace()` ``` scf.add_trace(_, v) ``` The updated binary tree is as follows. Note the difference between the child nodes of `Root` node. ``` display_trace_tree(scf.ct.root) ``` A complete fuzzer run is as follows ``` scf = SimpleConcolicFuzzer() for i in range(10): v = scf.fuzz() print(repr(v)) if v is None: continue with ConcolicTracer() as _: with ExpectError(): # z3.StringVal(urllib.parse.unquote('%80')) <-- bug in z3 _[cgi_decode](v) scf.add_trace(_, v) display_trace_tree(scf.ct.root) ``` **Note.** Our concolic tracer is limited in that it does not track changes in the string length. This leads it to treat every string with same prefix as the same string. The `SimpleConcolicFuzzer` is reasonably efficient at exploring paths near the path followed by a given sample input. However, it is not very intelligent when it comes to choosing which paths to follow. We look at another fuzzer that lifts the predicates obtained to the grammar and achieves better fuzzing. ### ConcolicGrammarFuzzer The concolic framework can be used directly in fuzzing. First, we extend our `GrammarFuzzer` with a helper method `tree_to_string()` such that we can retrieve the derivation tree of the fuzz output. We also define `prune_tree()` and `coalesce()` to reduce the depth of sub trees. These methods accept a list of tokens types such that a node belonging to the token type gets converted from a tree to a leaf node by calling `tree_to_string()`. ``` from InformationFlow import INVENTORY_GRAMMAR, SQLException from GrammarFuzzer import GrammarFuzzer class ConcolicGrammarFuzzer(GrammarFuzzer): def tree_to_string(self, tree): symbol, children, *_ = tree e = '' if children: return e.join([self.tree_to_string(c) for c in children]) else: return e if symbol in self.grammar else symbol def prune_tree(self, tree, tokens): name, children = tree children = self.coalesce(children) if name in tokens: return (name, [(self.tree_to_string(tree), [])]) else: return (name, [self.prune_tree(c, tokens) for c in children]) def coalesce(self, children): last = '' new_lst = [] for cn, cc in children: if cn not in self.grammar: last += cn else: if last: new_lst.append((last, [])) last = '' new_lst.append((cn, cc)) if last: new_lst.append((last, [])) return new_lst ``` We can now use the fuzzer to produce inputs for our DB. ``` tgf = ConcolicGrammarFuzzer(INVENTORY_GRAMMAR) while True: qtree = tgf.fuzz_tree() query = str(tgf.tree_to_string(qtree)) if query.startswith('select'): break from ExpectError import ExpectError with ExpectError(): print(repr(query)) with ConcolicTracer() as _: res = _[db_select](str(query)) print(repr(res)) ``` Our fuzzer returns with an exception. It is unable to find the specified table. Let us examine the predicates it encountered. ``` for i, p in enumerate(_.path): print(i, p) ``` Note that we can obtain constraints that are not present in the grammar from using the `ConcolicTracer`. In particular, see how we are able to obtain the condition that the table needs to be `inventory` (Predicate 11) for the fuzzing to succeed. How do we lift these to the grammar? and in particular how do we do it automatically? One option we have is to simply switch the last predicate obtained. In our case, the last predicate is (11). Can we simply invert the predicate and solve it again? ``` new_path = _.path[0:-1] + [z3.Not(_.path[-1])] new_ = ConcolicTracer((_.decls, new_path)) new_.fn = _.fn new_.fn_args = _.fn_args new_.zeval() ``` Indeed, this will not work as the string lengths being compared to are different. ``` print(_.path[-1]) z3.solve(z3.Not(_.path[-1])) ``` A better idea is to investigate what _string_ comparisons are being made, and associate that with the corresponding nodes in the grammar. Let us examine our derivation tree (pruned to avoid recursive structures, and to focus on important parts). ``` from GrammarFuzzer import display_tree prune_tokens = [ '<value>', '<table>', '<column>', '<literals>', '<exprs>', '<bexpr>' ] dt = tgf.prune_tree(qtree, prune_tokens) display_tree(dt) ``` Can we identify which part of the input was supplied by which part of the grammar? We define `span()` that can recover this information from the derivation tree. For a given node, let us assume that the start point is known. Then, for processing the children, we proceed as follows: We choose one child at a time from left to right, and compute the length of the child. The length of the children before the current child in addition to our starting point gives the starting point of the current child. The end point for each node is simply the end point of its last children (or the length of its node if it is a leaf). ``` from GrammarFuzzer import START_SYMBOL def span(node, g, node_start=0): hm = {} k, cs = node end_i = node_start new_cs = [] for c in cs: chm, (ck, child_start, child_end, gcs) = span(c, g, end_i) new_cs.append((ck, child_start, child_end, gcs)) end_i = child_end hm.update(chm) node_end = end_i if cs else node_start + len(k) if k in g and k != START_SYMBOL: hm[k] = (node_start, node_end - node_start) return hm, (k, node_start, node_end, new_cs) ``` We use it as follows: ``` span_hm, _n = span(dt, INVENTORY_GRAMMAR) span_hm ``` We can check if we got the right values as follows. ``` print("query:", query) for k in span_hm: start, l = span_hm[k] print(k, query[start:start + l]) ``` Next, we need to obtain all the comparisons made in each predicate. For that, we define two helper functions. The first is `unwrap_substrings()` that translates multiple calls to `z3.SubString` and returns the start, and length of the given z3 string expression. ``` def unwrap_substrings(s): assert s.decl().name() == 'str.substr' cs, frm, l = s.children() fl = frm.as_long() ll = l.as_long() if cs.decl().name() == 'str.substr': newfrm, _l = unwrap_substrings(cs) return (fl + newfrm, ll) else: return (fl, ll) ``` We define `traverse_z3()` that traverses a given z3 string expression, and collects all direct string comparisons to a substring of the original argument. ``` def traverse_z3(p, hm): def z3_as_string(v): return v.as_string() n = p.decl().name() if n == 'not': return traverse_z3(p.children()[0], hm) elif n == '=': i, j = p.children() if isinstance(i, (int, z3.IntNumRef)): return traverse_z3(j, hm) elif isinstance(j, (int, z3.IntNumRef)): return traverse_z3(i, hm) else: if i.is_string() and j.is_string(): if i.is_string_value(): cs, frm, l = j.children() if (isinstance(frm, z3.IntNumRef) and isinstance(l, z3.IntNumRef)): hm[z3_as_string(i)] = unwrap_substrings(j) elif j.is_string_value(): cs, frm, l = i.children() if (isinstance(frm, z3.IntNumRef) and isinstance(l, z3.IntNumRef)): hm[z3_as_string(j)] = unwrap_substrings(i) else: assert False # for now elif n == '<' or n == '>': i, j = p.children() if isinstance(i, (int, z3.IntNumRef)): return traverse_z3(j, hm) elif isinstance(j, (int, z3.IntNumRef)): return traverse_z3(i, hm) else: assert False return p comparisons = {} for p in _.path: traverse_z3(p, comparisons) comparisons ``` All that we need now is to declare string variables that match the substrings in `comparisons`, and solve for them for each item in the path. For that, we define `find_alternatives()`. ``` def find_alternatives(spans, cmp): alts = {} for key in spans: start, l = spans[key] rset = set(range(start, start + l)) for ckey in cmp: cstart, cl = cmp[ckey] cset = set(range(cstart, cstart + cl)) # if rset.issubset(cset): <- ignoring subsets for now. if rset == cset: if key not in alts: alts[key] = set() alts[key].add(ckey) return alts ``` We use it as follows. ``` alternatives = find_alternatives(span_hm, comparisons) alternatives ``` So, we have our alternatives for each key in the grammar. We can now update our grammar as follows. ``` INVENTORY_GRAMMAR_NEW = dict(INVENTORY_GRAMMAR) for k in alternatives: INVENTORY_GRAMMAR_NEW[k] = INVENTORY_GRAMMAR_NEW[k] + list(alternatives[k]) ``` We made a choice here. We could have completely overwritten the definition of `<table>` . Instead, we added our new alternatives to the existing definition. This way, our fuzzer will also attempt other values for `<table>` once in a while. ``` INVENTORY_GRAMMAR_NEW['<table>'] ``` Let us try fuzzing with our new grammar. ``` cgf = ConcolicGrammarFuzzer(INVENTORY_GRAMMAR_NEW) for i in range(10): qtree = cgf.fuzz_tree() query = cgf.tree_to_string(qtree) print(query) with ExpectError(): try: with ConcolicTracer() as _: res = _[db_select](query) print(repr(res)) except SQLException as e: print(e) print() ``` That is, we were able to reach the dangerous method `my_eval()`. In effect, what we have done is to lift parts of predicates to the grammar. The new grammar can generate inputs that reach deeper into the program than before. Note that we have only handled the equality predicate. One can also lift the '<' and '>' comparison operators to the grammar if required. Compare the output of our fuzzer to the original `GrammarFuzzer` below. ``` gf = GrammarFuzzer(INVENTORY_GRAMMAR) for i in range(10): query = gf.fuzz() print(query) with ExpectError(): try: res = db_select(query) print(repr(res)) except SQLException as e: print(e) print() ``` As can be seen, the original grammar fuzzer is unable to proceed beyond the table verification. #### All together We implement these methods in `ConcolicGrammarFuzzer`. The method `update_grammar()` allows `ConcolicGrammarFuzzer` to collect feedback from concolic fuzzing, and update the grammar used for fuzzing accordingly. ``` class ConcolicGrammarFuzzer(ConcolicGrammarFuzzer): def prune_tokens(self, tokens): self.prune_tokens = tokens def update_grammar(self, trace): self.comparisons = {} for p in trace.path: traverse_z3(p, self.comparisons) alternatives = find_alternatives(self.span_range, self.comparisons) if self.log: print('Alternatives:', alternatives, 'Span:', self.span_range) new_grammar = dict(self.grammar) for k in alternatives: new_grammar[k] = list(set(new_grammar[k] + list(alternatives[k]))) self.grammar = new_grammar ``` The `fuzz()` method simply generates the derivation tree, computes the span range, and returns the string generated from the derivation tree. ``` class ConcolicGrammarFuzzer(ConcolicGrammarFuzzer): def fuzz(self): qtree = self.fuzz_tree() self.pruned_tree = self.prune_tree(qtree, self.prune_tokens) query = self.tree_to_string(qtree) self.span_range, _n = span(self.pruned_tree, self.grammar) return query ``` To ensure that our approach works, let us update our tables slightly. ``` inventory = db.db.pop('inventory', None) db.db['vehicles'] = inventory db.db['months'] = ({ 'month': int, 'name': str }, [{ 'month': i + 1, 'name': m } for i, m in enumerate([ 'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec' ])]) db.db ``` The `ConcolicGrammarFuzzer` is used as follows. ``` cgf = ConcolicGrammarFuzzer(INVENTORY_GRAMMAR) cgf.prune_tokens(prune_tokens) for i in range(10): query = cgf.fuzz() print(query) with ConcolicTracer() as _: with ExpectError(): try: res = _[db_select](query) print(repr(res)) except SQLException as e: print(e) cgf.update_grammar(_) print() ``` As can be seen, the fuzzer starts with no knowledge of the tables `vehicles`, `months` and `years`, but identifies it from the concolic execution, and lifts it to the grammar. This allows us to improve the effectiveness of fuzzing. ## Limitations As with dynamic taint analysis, implicit control flow can obscure the predicates encountered during concolic execution. However, this limitation could be overcome to some extent by wrapping any constants in the source with their respective proxy objects. Similarly, calls to internal C functions can cause the symbolic information to be discarded, and only partial information may be obtained. ## Synopsis This chapter defines two main classes: `SimpleConcolicFuzzer` and `ConcolicGrammarFuzzer`. The `SimpleConcolicFuzzer` first uses a sample input to collect predicates encountered. The fuzzer then negates random predicates to generate new input constraints. These, when solved, produce inputs that explore paths that are close to the original path. It can be used as follows. We first obtain the constraints using `ConcolicTracer`. ``` with ConcolicTracer() as _: _[cgi_decode]('a%20d') ``` These constraints are added to the concolic fuzzer as follows: ``` scf = SimpleConcolicFuzzer() scf.add_trace(_, 'a%20d') ``` The concolic fuzzer then uses the constraints added to guide its fuzzing as follows: ``` scf = SimpleConcolicFuzzer() for i in range(10): v = scf.fuzz() if v is None: break print(repr(v)) with ExpectError(): with ConcolicTracer() as _: _[cgi_decode](v) scf.add_trace(_, v) ``` The `SimpleConcolicFuzzer` simply explores all paths near the original path traversed by the sample input. It uses a simple mechanism to explore the paths that are near the paths that it knows about, and other than code paths, knows nothing about the input. The `ConcolicGrammarFuzzer` on the other hand, knows about the input grammar, and can collect feedback from the subject under fuzzing. It can lift some of the constraints encountered to the grammar, enabling deeper fuzzing. It is used as follows: ``` from InformationFlow import INVENTORY_GRAMMAR, SQLException cgf = ConcolicGrammarFuzzer(INVENTORY_GRAMMAR) cgf.prune_tokens(prune_tokens) for i in range(10): query = cgf.fuzz() print(query) with ConcolicTracer() as _: with ExpectError(): try: res = _[db_select](query) print(repr(res)) except SQLException as e: print(e) cgf.update_grammar(_) print() ``` ## Lessons Learned * Concolic execution can often provide more information than taint analysis with respect to the program behavior. However, this comes at a much larger runtime cost. Hence, unlike taint analysis, real-time analysis is often not possible. * Similar to taint analysis, concolic execution also suffers from limitations such as indirect control flow and internal function calls. * Predicates from concolic execution can be used in conjunction with fuzzing to provide an even more robust indication of incorrect behavior than taints, and can be used to create grammars that are better at producing valid inputs. ## Next Steps A costlier but stronger alternative to concolic fuzzing is [symbolic fuzzing](SymbolicFuzzer.ipynb). Similarly, [search based fuzzing](SearchBasedFuzzer.ipynb) can often provide a cheaper exploration strategy than relying on SMT solvers to provide inputs slightly different from the current path. ## Background The technique of concolic execution was originally used to inform and expand the scope of _symbolic execution_ \cite{king1976symbolic}, a static analysis technique for program analysis. Laron et al. cite{Larson2003} was the first to use the concolic execution technique. The idea of using proxy objects for collecting constraints was pioneered by Cadar et al. \cite{cadar2005execution}. The concolic execution technique for Python programs used in this chapter was pioneered by PeerCheck \cite{PeerCheck}, and Python Error Finder \cite{Barsotti2018}. ## Exercises ### Exercise 1: Implment a Concolic Float Proxy Class While implementing the `zint` binary operators, we asserted that the results were `int`. However, that need not be the case. For example, division can result in `float`. Hence, we need proxy objects for `float`. Can you implement a similar proxy object for `float` and fix the `zint` binary operator definition? __Solution.__ The solution is as follows. As in the case of `zint`, we first open up `zfloat` for extension. ``` class zfloat(float): def __new__(cls, context, zn, v, *args, **kw): return float.__new__(cls, v, *args, **kw) ``` We then implement the initialization methods. ``` class zfloat(zfloat): @classmethod def create(cls, context, zn, v=None): return zproxy_create(cls, 'Real', z3.Real, context, zn, v) def __init__(self, context, z, v=None): self.z, self.v = z, v self.context = context ``` The helper for when one of the arguments in a binary operation is not `float`. ``` class zfloat(zfloat): def _zv(self, o): return (o.z, o.v) if isinstance(o, zfloat) else (z3.RealVal(o), o) ``` Coerce `float` into bool value for use in conditionals. ``` class zfloat(zfloat): def __bool__(self): # force registering boolean condition if self != 0.0: return True return False ``` Define the common proxy method for comparison methods ``` def make_float_bool_wrapper(fname, fun, zfun): def proxy(self, other): z, v = self._zv(other) z_ = zfun(self.z, z) v_ = fun(self.v, v) return zbool(self.context, z_, v_) return proxy ``` We apply the comparison methods on the defined `zfloat` class. ``` FLOAT_BOOL_OPS = [ '__eq__', # '__req__', '__ne__', # '__rne__', '__gt__', '__lt__', '__le__', '__ge__', ] for fname in FLOAT_BOOL_OPS: fun = getattr(float, fname) zfun = getattr(z3.ArithRef, fname) setattr(zfloat, fname, make_float_bool_wrapper(fname, fun, zfun)) ``` Similarly, we define the common proxy method for binary operators. ``` def make_float_binary_wrapper(fname, fun, zfun): def proxy(self, other): z, v = self._zv(other) z_ = zfun(self.z, z) v_ = fun(self.v, v) return zfloat(self.context, z_, v_) return proxy ``` And apply them on `zfloat` ``` FLOAT_BINARY_OPS = [ '__add__', '__sub__', '__mul__', '__truediv__', # '__div__', '__mod__', # '__divmod__', '__pow__', # '__lshift__', # '__rshift__', # '__and__', # '__xor__', # '__or__', '__radd__', '__rsub__', '__rmul__', '__rtruediv__', # '__rdiv__', '__rmod__', # '__rdivmod__', '__rpow__', # '__rlshift__', # '__rrshift__', # '__rand__', # '__rxor__', # '__ror__', ] for fname in FLOAT_BINARY_OPS: fun = getattr(float, fname) zfun = getattr(z3.ArithRef, fname) setattr(zfloat, fname, make_float_binary_wrapper(fname, fun, zfun)) ``` These are used as follows. ``` with ConcolicTracer() as _: za = zfloat.create(_.context, 'float_a', 1.0) zb = zfloat.create(_.context, 'float_b', 0.0) if za * zb: print(1) _.context ``` Finally, we fix the `zint` binary wrapper to correctly create `zfloat` when needed. ``` def make_int_binary_wrapper(fname, fun, zfun): def proxy(self, other): z, v = self._zv(other) z_ = zfun(self.z, z) v_ = fun(self.v, v) if isinstance(v_, float): return zfloat(self.context, z_, v_) elif isinstance(v_, int): return zint(self.context, z_, v_) else: assert False return proxy for fname in INT_BINARY_OPS: fun = getattr(int, fname) zfun = getattr(z3.ArithRef, fname) setattr(zint, fname, make_int_binary_wrapper(fname, fun, zfun)) ``` Checking whether it worked as expected. ``` with ConcolicTracer() as _: v = _[binomial](4, 2) _.zeval() ``` ### Exercise 2: Bit Manipulation Similar to floats, implementing the bit manipulation functions such as `xor` involves converting `int` to its bit vector equivalents, performing operations on them, and converting it back to the original type. Can you implement the bit manipulation operations for `zint`? __Solution.__ The solution is as follows. We first define the proxy method as before. ``` def make_int_bit_wrapper(fname, fun, zfun): def proxy(self, other): z, v = self._zv(other) z_ = z3.BV2Int( zfun( z3.Int2BV( self.z, num_bits=64), z3.Int2BV( z, num_bits=64))) v_ = fun(self.v, v) return zint(self.context, z_, v_) return proxy ``` It is then applied to the `zint` class. ``` BIT_OPS = [ '__lshift__', '__rshift__', '__and__', '__xor__', '__or__', '__rlshift__', '__rrshift__', '__rand__', '__rxor__', '__ror__', ] def init_concolic_4(): for fname in BIT_OPS: fun = getattr(int, fname) zfun = getattr(z3.BitVecRef, fname) setattr(zint, fname, make_int_bit_wrapper(fname, fun, zfun)) INITIALIZER_LIST.append(init_concolic_4) init_concolic_4() ``` Invert is the only unary bit manipulation method. ``` class zint(zint): def __invert__(self): return zint(self.context, z3.BV2Int( ~z3.Int2BV(self.z, num_bits=64)), ~self.v) ``` The `my_fn()` computes `xor` and returns `True` if the `xor` results in a non zero value. ``` def my_fn(a, b): o_ = (a | b) a_ = (a & b) if o_ & ~a_: return True else: return False ``` Using that under `ConcolicTracer` ``` with ConcolicTracer() as _: print(_[my_fn](2, 1)) ``` We log the computed SMT expression to verify that everything went well. ``` _.zeval(log=True) ``` We can confirm from the formulas generated that the bit manipulation functions worked correctly. ### Exercise 3: String Translation Functions We have seen how to define `upper()` and `lower()`. Can you define the `capitalize()`, `title()`, and `swapcase()` methods? __Solution.__ Solution not yet available.
github_jupyter
# Approximate q-learning In this notebook you will teach a __pytorch__ neural network to do Q-learning. ``` # in google colab uncomment this import os os.system('apt-get update') os.system('apt-get install -y xvfb') os.system('wget https://raw.githubusercontent.com/yandexdataschool/Practical_DL/fall18/xvfb -O ../xvfb') os.system('apt-get install -y python-opengl ffmpeg') os.system('pip install pyglet==1.5.0') # XVFB will be launched if you run on a server import os if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0: !bash ../xvfb start os.environ['DISPLAY'] = ':1' import gym import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline env = gym.make("CartPole-v0").env env.reset() n_actions = env.action_space.n state_dim = env.observation_space.shape plt.imshow(env.render("rgb_array")) env.close() ``` # Approximate Q-learning: building the network To train a neural network policy one must have a neural network policy. Let's build it. Since we're working with a pre-extracted features (cart positions, angles and velocities), we don't need a complicated network yet. In fact, let's build something like this for starters: ![img](https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/yet_another_week/_resource/qlearning_scheme.png) For your first run, please only use linear layers (nn.Linear) and activations. Stuff like batch normalization or dropout may ruin everything if used haphazardly. Also please avoid using nonlinearities like sigmoid & tanh: agent's observations are not normalized so sigmoids may become saturated from init. Ideally you should start small with maybe 1-2 hidden layers with < 200 neurons and then increase network size if agent doesn't beat the target score. ``` import torch import torch.nn as nn import torch.nn.functional as F network = nn.Sequential() network.add_module('layer1', < ... >) <YOUR CODE: stack layers!!!1 > # hint: use state_dim[0] as input size def get_action(state, epsilon=0): """ sample actions with epsilon-greedy policy recap: with p = epsilon pick random action, else pick action with highest Q(s,a) """ state = torch.tensor(state[None], dtype=torch.float32) q_values = network(state).detach().numpy() # YOUR CODE return int( < epsilon-greedily selected action > ) s = env.reset() assert tuple(network(torch.tensor([s]*3, dtype=torch.float32)).size()) == ( 3, n_actions), "please make sure your model maps state s -> [Q(s,a0), ..., Q(s, a_last)]" assert isinstance(list(network.modules( ))[-1], nn.Linear), "please make sure you predict q-values without nonlinearity (ignore if you know what you're doing)" assert isinstance(get_action( s), int), "get_action(s) must return int, not %s. try int(action)" % (type(get_action(s))) # test epsilon-greedy exploration for eps in [0., 0.1, 0.5, 1.0]: state_frequencies = np.bincount( [get_action(s, epsilon=eps) for i in range(10000)], minlength=n_actions) best_action = state_frequencies.argmax() assert abs(state_frequencies[best_action] - 10000 * (1 - eps + eps / n_actions)) < 200 for other_action in range(n_actions): if other_action != best_action: assert abs(state_frequencies[other_action] - 10000 * (eps / n_actions)) < 200 print('e=%.1f tests passed' % eps) ``` ### Q-learning via gradient descent We shall now train our agent's Q-function by minimizing the TD loss: $$ L = { 1 \over N} \sum_i (Q_{\theta}(s,a) - [r(s,a) + \gamma \cdot max_{a'} Q_{-}(s', a')]) ^2 $$ Where * $s, a, r, s'$ are current state, action, reward and next state respectively * $\gamma$ is a discount factor defined two cells above. The tricky part is with $Q_{-}(s',a')$. From an engineering standpoint, it's the same as $Q_{\theta}$ - the output of your neural network policy. However, when doing gradient descent, __we won't propagate gradients through it__ to make training more stable (see lectures). To do so, we shall use `x.detach()` function which basically says "consider this thing constant when doingbackprop". ``` def compute_td_loss(states, actions, rewards, next_states, is_done, gamma=0.99, check_shapes=False): """ Compute td loss using torch operations only. Use the formula above. """ states = torch.tensor( states, dtype=torch.float32) # shape: [batch_size, state_size] actions = torch.tensor(actions, dtype=torch.long) # shape: [batch_size] rewards = torch.tensor(rewards, dtype=torch.float32) # shape: [batch_size] # shape: [batch_size, state_size] next_states = torch.tensor(next_states, dtype=torch.float32) is_done = torch.tensor(is_done, dtype=torch.uint8) # shape: [batch_size] # get q-values for all actions in current states predicted_qvalues = network(states) # select q-values for chosen actions predicted_qvalues_for_actions = predicted_qvalues[ range(states.shape[0]), actions ] # compute q-values for all actions in next states predicted_next_qvalues = # YOUR CODE # compute V*(next_states) using predicted next q-values next_state_values = # YOUR CODE assert next_state_values.dtype == torch.float32 # compute "target q-values" for loss - it's what's inside square parentheses in the above formula. target_qvalues_for_actions = # YOUR CODE # at the last state we shall use simplified formula: Q(s,a) = r(s,a) since s' doesn't exist target_qvalues_for_actions = torch.where( is_done, rewards, target_qvalues_for_actions) # mean squared error loss to minimize loss = torch.mean((predicted_qvalues_for_actions - target_qvalues_for_actions.detach()) ** 2) if check_shapes: assert predicted_next_qvalues.data.dim( ) == 2, "make sure you predicted q-values for all actions in next state" assert next_state_values.data.dim( ) == 1, "make sure you computed V(s') as maximum over just the actions axis and not all axes" assert target_qvalues_for_actions.data.dim( ) == 1, "there's something wrong with target q-values, they must be a vector" return loss # sanity checks s = env.reset() a = env.action_space.sample() next_s, r, done, _ = env.step(a) loss = compute_td_loss([s], [a], [r], [next_s], [done], check_shapes=True) loss.backward() assert len(loss.size()) == 0, "you must return scalar loss - mean over batch" assert np.any(next(network.parameters()).grad.detach().numpy() != 0), "loss must be differentiable w.r.t. network weights" ``` ### Playing the game ``` opt = torch.optim.Adam(network.parameters(), lr=1e-4) epsilon = 0.5 def generate_session(t_max=1000, epsilon=0, train=False): """play env with approximate q-learning agent and train it at the same time""" total_reward = 0 s = env.reset() for t in range(t_max): a = get_action(s, epsilon=epsilon) next_s, r, done, _ = env.step(a) if train: opt.zero_grad() compute_td_loss([s], [a], [r], [next_s], [done]).backward() opt.step() total_reward += r s = next_s if done: break return total_reward for i in range(1000): session_rewards = [generate_session( epsilon=epsilon, train=True) for _ in range(100)] print("epoch #{}\tmean reward = {:.3f}\tepsilon = {:.3f}".format( i, np.mean(session_rewards), epsilon)) epsilon *= 0.99 assert epsilon >= 1e-4, "Make sure epsilon is always nonzero during training" if np.mean(session_rewards) > 300: print("You Win!") break ``` ### How to interpret results Welcome to the f.. world of deep f...n reinforcement learning. Don't expect agent's reward to smoothly go up. Hope for it to go increase eventually. If it deems you worthy. Seriously though, * __ mean reward__ is the average reward per game. For a correct implementation it may stay low for some 10 epochs, then start growing while oscilating insanely and converges by ~50-100 steps depending on the network architecture. * If it never reaches target score by the end of for loop, try increasing the number of hidden neurons or look at the epsilon. * __ epsilon__ - agent's willingness to explore. If you see that agent's already at < 0.01 epsilon before it's is at least 200, just reset it back to 0.1 - 0.5. ### Record videos As usual, we now use `gym.wrappers.Monitor` to record a video of our agent playing the game. Unlike our previous attempts with state binarization, this time we expect our agent to act ~~(or fail)~~ more smoothly since there's no more binarization error at play. As you already did with tabular q-learning, we set epsilon=0 for final evaluation to prevent agent from exploring himself to death. ``` # record sessions import gym.wrappers env = gym.wrappers.Monitor(gym.make("CartPole-v0"), directory="videos", force=True) sessions = [generate_session(epsilon=0, train=False) for _ in range(100)] env.close() # show video from IPython.display import HTML import os video_names = list( filter(lambda s: s.endswith(".mp4"), os.listdir("./videos/"))) HTML(""" <video width="640" height="480" controls> <source src="{}" type="video/mp4"> </video> """.format("./videos/"+video_names[-1])) # this may or may not be _last_ video. Try other indices ```
github_jupyter
# Lecture 16: Classification # Problem setting ## Review In last few lectures we have learned the linear regression, where we explore the possibility of using a linear function (or higher degree polynomials) to represent the relation of the features in the samples (aka labels, $x$ values, or training data `X_train`) to a target value ($y$ values `y_train`), so that we can predict the target value $y$ (`y_pred` obtained by the model) based on testing data `X_test`. However, linear regression is not appropriate in the case of a qualitative target value. ## Classification Today, we will learn how to predict a discrete label such as * predicting whether a grid of pixel intensities represents a "0" digit or a "1" digit; * predicting whether tomorrow will have rain based on previous days' data. * predicting whether a wine is good or mediocre based on its chemical components' data. This is a classification problem. Logistic regression is a simple classification algorithm for learning to make such decisions for a binary label. Reference: MATLAB tutorial in [Stanford Deep Learning tutorial](http://deeplearning.stanford.edu/tutorial/). # Logistic Regression ---- ## Heuristics Recall the `winequality-red.csv` we have used in the last few lectures and labs. If the `quality` of a wine is $\geq 6$, relabel it as "favorable"; if the `quality` of a wine is $\leq 5$, relabel it as "mediocre". For a certain sample $(\mathbf{x}^{(i)}, y^{(i)})$, where $\mathbf{x}^{(i)}$ is the vector representing its first 11 features, and $y^{(i)}$ is the quality score (label), if we know its score is 7, then $$ P\big(i\text{-th sample is favorable} \big) = 1, \qquad P\big(i\text{-th sample is mediocre} \big) = 0. $$ If we relabel the "favorable" and "mediocre" into 1 and 0 as our values for $y^{(i)}$, then $$ P\big(y^{(i)} = 1\big) = 1, \qquad P\big(y^{(i)} = 0\big) = 0. $$ If some other sample, say $j$-th sample, has quality score 4, then $$ P\big(y^{(i)} = 1\big) = 0, \qquad P\big(y^{(i)} = 0\big) = 1. $$ We can use vector $[1,0]$ to represent the first sample's probability in each class, and vector $[0,1]$ to represent that of the second sample. We want to build a model, so that given the first 11 features $\mathbf{x}$ of a certain sample, it can output an estimate, say, $[0.8, 0.2]$ to tell me that $$ P\big(y = 1| \mathbf{x}\big) = 0.8, \qquad P\big(y = 0|\mathbf{x}\big) = 0.2, $$ which is to say, this sample has 0.8 chance in Class 1, 0.2 chance in the Class 0. The predicted label $\hat{y}$ is then: $$ \hat{y} = \operatorname{arg}\max_{j} P\big(y = j| \mathbf{x}\big), $$ i.e., we use the biggest estimated probability's class as this sample's predicted label. # Logistic regression ---- ## Model function (hypothesis) Weights vector $\mathbf{w}$, same shape with a sample's feature vector $\mathbf{x}$, $h(\mathbf{x})$ is our estimate of $ P(y=1|\mathbf{x})$ and $1 - h(\mathbf{x})$ is our estimate of $P(y=0|\mathbf{x}) = 1 - P(y=1|\mathbf{x})$. $$ h(\mathbf{x}) = h(\mathbf{x};\mathbf{w}) = \frac{1}{1 + \exp(-\mathbf{w}^\top \mathbf{x})} =: \sigma(\mathbf{w}^\top \mathbf{x}) $$ or more compactly, because $y = 0$ or $1$: $$ P(y|\mathbf{x}) \text{ is estimated by } h(\mathbf{x})^y \big(1 - h(\mathbf{x}) \big)^{1-y}. $$ ---- ## Loss function $$ L (\mathbf{w}; X, \mathbf{y}) = - \frac{1}{N}\sum_{i=1}^N \Bigl\{y^{(i)} \ln\big( h(\mathbf{x}^{(i)}; \mathbf{w}) \big) + (1 - y^{(i)}) \ln\big( 1 - h(\mathbf{x}^{(i)};\mathbf{w}) \big) \Bigr\}. \tag{$\star$} $$ ---- ## Training The gradient of the loss function with respect to the weights $\mathbf{w}$ is: $$ \nabla_{\mathbf{w}} \big( L (\mathbf{w}) \big) =\frac{1}{N}\sum_{i=1}^N \big( h(\mathbf{x}^{(i)};\mathbf{w}) - y^{(i)} \big) \mathbf{x}^{(i)} . \tag{$\dagger$} $$ ``` import numpy as np # model h(X; w) = sigma(-Xw) # w: weights # X: training data # X.shape[0] is no. of samples, and X.shape[1] is the no. of features def h(w,X): z = np.matmul(X,w) return 1.0 / (1.0 + np.exp(-z)) # loss function, modulo by N (size of training data), a vectorized implementation without for loop def loss(w,X,y): loss_components = np.log(h(w,X)) * y + (1.0 - y)* np.log(1 - h(w,X)) # above is a dimension (12665,) array return -np.mean(loss_components) # same with loss_components.sum()/N def gradient_loss(w,X,y): gradient_for_all_training_data = (h(w,X) - y).reshape(-1,1)*X # we should return a (n,) array, which is averaging all N training data's gradient return np.mean(gradient_for_all_training_data, axis=0) ``` # Reading 1: Derivation of the logistic regression For binary-valued labels, $y^{(i)} \in \{0,1\}$, we are trying to predict the probability that a given example belongs to the "1" class versus the probability that it belongs to the "0" class. Specifically, we will use the **logistic regression**, which tries to learn a function of the form: $$ h(\mathbf{x}) = h(\mathbf{x};\mathbf{w}) = \frac{1}{1 + \exp(-\mathbf{w}^\top \mathbf{x})} =: \sigma(\mathbf{w}^\top \mathbf{x}) $$ or more compactly, because $y = 0$ or $1$: $$ P(y|\mathbf{x}) = h(\mathbf{x})^y \big(1 - h(\mathbf{x}) \big)^{1-y} $$ ---- ## Sigmoid function The function $\sigma(z) = 1/\big(1+\exp(−z)\big)$ is often called the "sigmoid" or "logistic" function, or "logistic/sigmoid" activation function in machine learning. It is an S-shaped function that "squashes" the value of $\mathbf{w}^\top \mathbf{x}$ into the range $[0,1]$ so that we may interpret $h(\mathbf{x})$ as a probability. Our goal is to search for a value of the weights $\mathbf{w}$ so that: > The probability $P(y=1|\mathbf{x})=h(\mathbf{x})$ is large when $x$ belongs to the "1" class, small when $x$ belongs to the "0" class (so that $P(y=0|\mathbf{x})=1- h(\mathbf{x})$ is large). ---- ## Maximum likelihood For a set of training examples with binary labels $\{(\mathbf{x}^{(i)},y^{(i)}):i=1,\dots,N\}$ the following likelihood estimator measures how well a given model $h(\mathbf{x};\mathbf{w})$ does this separating class job: assuming our training samples are independently Bernoulli distributed, we want to maximize the following quantity $$ {\begin{aligned} &P(\mathbf{y}\; | \; \mathbf{X};\mathbf{w} )\\ =&\prod _{i=1}^N P\left(y^{(i)}\mid \mathbf{x}^{(i)};\mathbf{w}\right)\\ =&\prod_{i=1}^N h\big(\mathbf{x}^{(i)} \big)^{y^{(i)}} \Big(1-h\big(\mathbf{x}^{(i)}\big) \Big)^{\big(1-y^{(i)}\big)} \end{aligned}}. $$ This function is highly nonlinear on the weights $\mathbf{w}$ so we take the log and then average, lastly define our loss function to be minimized as follows: $$ L (\mathbf{w}) = L (\mathbf{w}; X,\mathbf{y}) = - \frac{1}{N}\sum_{i=1}^N \Bigl\{y^{(i)} \ln\big( h(\mathbf{x}^{(i)}) \big) + (1 - y^{(i)}) \ln\big( 1 - h(\mathbf{x}^{(i)}) \big) \Bigr\}. \tag{$\star$} $$ Note that only one of the two terms in the summation is non-zero for each training sample (depending on whether the label $y^{(i)}$ is 0 or 1). When $y^{(i)}=1$ minimizing the loss function means we need to make $h(x^{(i)})$ large, and when $y^{(i)}= 0$ we want to make $1- h(x^{(i)})$ large as explained above. ---- ## Training and cross-validation After the loss function $L (\mathbf{w})$ is set up, the training data is used by the gradient descent to minimize $L (\mathbf{w})$ to find the best choice of weights $\mathbf{w}$. Even though the cost function $(\star)$ looks quite complicated, due to the following special property of the Sigmoid function $$ \frac{d}{dz} \big(\sigma(z)\big) = \frac{d}{dz} \left(\frac{1}{1+\exp(−z)}\right) = \sigma(z)\cdot \big(1-\sigma(z)\big). $$ Therefore recalling $h(\mathbf{x}) = \sigma(\mathbf{w}^\top \mathbf{x})$ $$ \begin{aligned} \frac{\partial L (\mathbf{w})}{\partial w_k} & = - \frac{1}{N}\sum_{i=1}^N \Bigg\{y^{(i)} \frac{1}{h(\mathbf{x}^{(i)})} \frac{\partial}{\partial w_k} \sigma\big(\mathbf{w}^{\top} \mathbf{x}^{(i)} \big) + (1 - y^{(i)}) \frac{1}{1-h(\mathbf{x}^{(i)})} \frac{\partial}{\partial w_k}\Big(1- \sigma\big(\mathbf{w}^{\top} \mathbf{x}^{(i)}\big) \Big) \Bigg\} \\ & = - \frac{1}{N}\sum_{i=1}^N \Bigg\{y^{(i)} \frac{1}{h(\mathbf{x}^{(i)})} \sigma\big(\mathbf{w}^{\top} \mathbf{x}^{(i)}\big) \cdot \big(1-\sigma(\mathbf{w}^{\top} \mathbf{x}^{(i)})\big) \frac{\partial}{\partial w_k} \sigma\big(\mathbf{w}^{\top} \mathbf{x}^{(i)} \big) \\ & \qquad \qquad - (1 - y^{(i)}) \frac{1}{1-h(\mathbf{x}^{(i)})} \sigma\big(\mathbf{w}^{\top} \mathbf{x}^{(i)}\big) \cdot \big(1-\sigma(\mathbf{w}^{\top} \mathbf{x}^{(i)})\big) \frac{\partial}{\partial w_k}\big(\mathbf{w}^{\top} \mathbf{x}^{(i)}\big) \Bigg\} \\ & = - \frac{1}{N}\sum_{i=1}^N \Bigg\{y^{(i)} \cdot \big(1-\sigma(\mathbf{w}^{\top} \mathbf{x}^{(i)})\big) \frac{\partial}{\partial w_k} \sigma\big(\mathbf{w}^{\top} \mathbf{x}^{(i)} \big) - (1 - y^{(i)}) \cdot \sigma(\mathbf{w}^{\top} \mathbf{x}^{(i)}) \frac{\partial}{\partial w_k}\big(\mathbf{w}^{\top} \mathbf{x}^{(i)}\big) \Bigg\} \\ & =\frac{1}{N}\sum_{i=1}^N \big(\sigma(\mathbf{w}^{\top} \mathbf{x}^{(i)}) - y^{(i)} \big) x^{(i)}_k. \end{aligned} $$ The final expression is pretty simple, basically the derivative of the Logistic loss function w.r.t. the $k$-th weight $w_k$ is the sum of the residuals $\big(\sigma(\mathbf{w}^{\top} \mathbf{x}^{(i)}) - y^{(i)} \big) $ multiply with the $k$-th component in the $i$-th training data $\mathbf{x}^{(i)}$. Therefore the gradient for all the weights $\mathbf{w}$ is then: $$ \nabla_{\mathbf{w}} \big( L (\mathbf{w}) \big) = \sum_{i=1}^N \big(\sigma(\mathbf{w}^{\top} \mathbf{x}^{(i)}) - y^{(i)} \big) \mathbf{x}^{(i)} =\frac{1}{N}\sum_{i=1}^N \big( h(\mathbf{x}^{(i)}) - y^{(i)} \big) \mathbf{x}^{(i)} . \tag{$\dagger$} $$ # Reading 2: Bayesian classification What we have learned above, the logistic regression and softmax regression, are two classification methods that are closely related to Bayesian classifiers. Because essentially, we are trying minimize the following the error associated with a set of observations of the form in a way (by introducing some model with weights): $$ \min_{\mathbf{w}} \Big[\text{Mean of } 1\big\{y^{(i), \text{Pred}} \neq y^{(i), \text{Actual}} \big\} \Big], $$ If there is no model yet, let $K= \# \text{ classes}$. Keep in mind for now there are no weights involved, we simply want to classify the samples into $K$ classes, so that the minimization of problem above is *assigning each sample to the most likely class it belongs to*, given its values (feature vector), i.e., we want to compute $$ \max_{j\in \{1,\dots ,K\}} P\big(y^{(i)}=j | \mathbf{x}^{(i)} \big) \tag{$\diamond$} $$ where $P\big(y^{(i)}=j | \mathbf{x}^{(i)} \big)$ is the conditional probability that the label $y^{(i)}=j$ (the $i$-th sample is in the $j$-th class), given the observed vector $\mathbf{x}^{(i)}$ for the $i$-th sample. This is called the naive Bayes classifier. ---- ### Naive Bayes classifier Using the definition of the conditional probability: for an arbitrary sample and its label $(\mathbf{x},y)$ $$ P(y=j | \mathbf {x} )={\frac { P( y = j, \mathbf {x})}{P(\mathbf {x} )}} \tag{$\ast$} $$ Assuming $\mathbf{x} = (x_1, x_2, \dots, x_n)$, i.e., each sample has $n$ features, then the numerator above is $ P(y=j)\ P(\mathbf {x} | y = j)$, where $P(y=j)$ is the probability that an arbitrary sample is of class $j$ without any observation $\mathbf{x}$, i.e., $P(y=j)$ is the portion of class $j$ against all all samples. Now using the definition of conditional probability again: $$ \begin{aligned} P(y=j,x_{1},\dots ,x_{n}) &= P(x_{1},\dots ,x_{n},y=j) \\ &= P(x_{1} | x_{2},\dots ,x_{n},y=j) P(x_{2},\dots ,x_{n},y=j) \\ &= P(x_{1} | x_{2},\dots ,x_{n},y=j) P(x_{2} | x_{3},\dots ,x_{n},y=j) P(x_{3},\dots ,x_{n},y=j) \\&=\dots \\&= P(x_{1} | x_{2},\dots ,x_{n},y=j) P(x_{2} | x_{3},\dots ,x_{n},y=j) \dots P(x_{n-1} | x_{n},y=j) P(x_{n}| y=j)P(y=j)\\ \end{aligned} \tag{$\ast\ast$} $$ Assuming each feature is independent from one another, which means whether put $x_l$ ($l\neq i$) into the given observed conditions does not affect the probability of $x_i$: $$ P(x_{i} | x_{i+1},\dots ,x_{n}, y =j) = P(x_{i}| y=j). $$ Since $P(\mathbf{x}) = 1/N$ is a fixed value (assuming uniform distributed sample), we have by $(\ast)$ and $(\ast\ast)$ $$ \begin{aligned} P(y=j | x_{1},\dots ,x_{n}) &\propto P(y=j,x_{1},\dots ,x_{n}) \\ &=P(y=j)\ P(x_{1} | y=j)\ P(x_{2}| y=j)\ P(x_{3} | y=j)\ \cdots \\ &=P(y=j)\prod_{i=1}^{n}P(x_{i}| y=j), \end{aligned} $$ Now for training sample $\mathbf{x}^{(i)}$, the problem becomes: $$ y^{(i), \text{Pred}}={\underset {j\in \{1,\dots ,K\}}{\operatorname {argmax} }}\ P(y = j)\displaystyle \prod _{i=1}^{n} P(x_{i} | y=j), $$ where $y^{(i), \text{Pred}}$ is the class which the probability $(\diamond)$ is maximized. ---- ### Pitfalls of naive Bayes classifier In reality, there are two main reasons the method above is neither practical nor reasonable. * there is no way $x_i$ and $x_l$ are independent when $i\neq l$ for a sample $\mathbf{x}$. Think in the handwritten digit classification example, $x_i$'s are the pixel intensity at $i$-th location (one of the pixel among 28x28 reshaped into a 784 array), any reasonable ansatz should not assume independency, because the pixel intensity are determined by the strokes. * For real data, we do not know $P(y=j)$'s true value, i.e., percentage of the samples in class $k$, because new data may come in. For the same reason $P(x_{i} | y=j)$ is not known either. Therefore, we introduce a model (an a priori assumption that the data can be described by such a model) with weights $\mathbf{w}$, and the problem changes to (softmax case) the following maximization of the log of the likehood function (or say cross entropy), $$ \max_{\mathbf{w}}\sum_{i=1}^N \left\{\sum_{j=1}^K 1_{\{y^{(i)} = j\}} \ln P\big(y^{(i)}=j | \mathbf{x}^{(i)} ; \mathbf{w} \big) \right\}, $$ in Lecture 15, 16, 17, we will try using gradient descent to minimize the negative version of above.
github_jupyter
# Modeling and Simulation in Python Milestone: Queueing theory Copyright 2017 Allen Downey License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0) ``` # If you want the figures to appear in the notebook, # and you want to interact with them, use # %matplotlib notebook # If you want the figures to appear in the notebook, # and you don't want to interact with them, use # %matplotlib inline # If you want the figures to appear in separate windows, use # %matplotlib qt5 # To switch from one to another, you have to select Kernel->Restart %matplotlib inline from modsim import * ``` ### One queue or two? This notebook presents a solution to an exercise from *Modeling and Simulation in Python*. It uses features from the first four chapters to answer a question related to queueing theory, which is the study of systems that involve waiting in lines, also known as "queues". Suppose you are designing the checkout area for a new store. There is room for two checkout counters and a waiting area for customers. You can make two lines, one for each counter, or one line that serves both counters. In theory, you might expect a single line to be better, but it has some practical drawbacks: in order to maintain a single line, you would have to install rope barriers, and customers might be put off by what seems to be a longer line, even if it moves faster. So you'd like to check whether the single line is really better and by how much. Simulation can help answer this question. As we did in the bikeshare model, we'll assume that a customer is equally likely to arrive during any timestep. I'll denote this probability using the Greek letter lambda, $\lambda$, or the variable name `lam`. Since it's a new store, we don't know what the value of $\lambda$ will be, so we'll have to consider a range of possibilities. Based on data from other stores, you know that it takes 5 minutes for a customer to check out, on average. But checkout times are highly variable: most customers take less than 5 minutes, but some take substantially more. A simple way to model this variability is to assume that when a customer is checking out, they have the same probability of finishing up during each time step. I'll denote this probability using the Greek letter mu, $\mu$, or the variable name `mu`. If we choose $\mu=1/5$, the average number of time steps for each checkout will be 5 minutes, which is consistent with the data. **Solution** I'll start by defining a `System` object to contain the system parameters. ``` def make_system(lam, mu): return System(lam=lam, mu=mu, x=0, duration=8*60) ``` As an example, I'll set the arrival rate to one customer per 8 minutes. ``` interarrival_time = 8 service_time = 5 lam = 1 / interarrival_time mu = 1 / service_time system = make_system(lam, mu) system ``` Here's a update function that simulates a single time step. During each time step, a customer can finish checking out (but only if there is a customer in the system), and a new customer can arrive. ``` def update_func1(system): """Simulate one time step. system: System object """ # if there's a customer in service, check if they're done if system.x > 0: if flip(system.mu): system.x -= 1 # check for an arrival if flip(system.lam): system.x += 1 ``` Now we can run the simulation. `run_simulation` creates a `TimeSeries` that maps from each time step to the total number of customers in the store, including the one checking out. After the simulation, we compute `L`, which is the average number of customers in the system, and `W`, which is the average time customers spend in the store. `L` and `W` are related by Little's Law: $L = \lambda W$ Where $\lambda$ is the arrival rate. ``` def run_simulation(system, update_func): """Simulate a queueing system. system: System object update_func: function object """ results = TimeSeries() for t in linrange(0, system.duration-1): update_func(system) results[t] = system.x system.results = results system.L = results.mean() system.W = system.L / system.lam ``` Here are the results with the parameters we chose. ``` run_simulation(system, update_func1) print(system.L, system.W) plot(system.results) ``` Since we don't know the actual value of $\lambda$, we can sweep through a range of possibilities, from 10% to 80% of the completion rate. If customers arrive faster than the completion rate, the queue grows without bound. In that case the metrics `L` and `W` just depend on how long the store is open. ``` mu = 1 / service_time num_vals = 101 lam_array = linspace(0.1*mu, 0.8*mu, num_vals) print(mu) lam_array ``` The model I chose for this system is a common model in queueing theory, in part because many of its properties can be derived analytically. In particular, we expect the average time in the store to be: $W = 1 / (\mu - \lambda)$ The following function plots the theoretical value of $W$ as a function of $\lambda$. ``` def plot_W(lam_array, mu): """Plot the theoretical mean wait time. lam_array: array of values for `lam` mu: probability of finishing a checkout """ W = 1 / (mu - lam_array) plot(lam_array, W, 'g-') plot_W(lam_array, mu) ``` Now let's run the simulation with a range of values for $\lambda$ and plot the observed value of `W` versus `lam`: ``` def sweep_lam(lam_array, mu, update_func): """Run simulations with a range of values for `lam` Plots wait time, W, versus lam, and prints the average of W across runs. lam_array: array of values for `lam` mu: probability of finishing a checkout update_func: passed along to run_simulation """ total = 0 for lam in lam_array: system = make_system(lam, mu) run_simulation(system, update_func) total += system.W plot(lam, system.W) W_avg = total / len(lam_array) print('Average of averages = ', W_avg, 'minutes') ``` If we imagine that this range of values represents arrival rates on different days, we can use the average value of `W`, for a range of values of `lam`, to compare different queueing strategies. Here are the results for a single queue with a single checkout counter. ``` plot_W(lam_array, mu) sweep_lam(lam_array, mu, update_func1) decorate(xlabel='Arrival rate (per minute)', ylabel='Average time in system') ``` The results on any simulated day are highly variable, but looks like the theoretical result is plausible. The simulated results tend to be lower, partly because they include a cold start at the beginning of each day. Now let's try the other two queueing strategies: 1. One queue with two checkout counters. 2. Two queues, one for each counter. The following figure shows the three scenarios: ![](figs/queue.png) Here's the update function for a single queue with two servers. ``` def update_func2(system): """Simulate a single queue with two servers. system: System object """ # if both servers are busy, check whether the # second is complete if system.x > 1 and flip(system.mu): system.x -= 1 # check whether the first is complete if system.x > 0 and flip(system.mu): system.x -= 1 # check for an arrival if flip(system.lam): system.x += 1 ``` Here are the results for a single run. ``` system = make_system(lam, mu) run_simulation(system, update_func2) print(system.L, system.W) plot(system.results) ``` Since we have two counters now, we can consider a wider range of values for $\lambda$ ``` lam_array = linspace(0.1*mu, 1.6*mu, num_vals) ``` Here's what the results look like. With two counters, the average time in the store is lower, even for higher values of $\lambda$ ``` sweep_lam(lam_array, mu, update_func2) decorate(xlabel='Arrival rate (per minute)', ylabel='Average time in system') ``` Finally, here's the update function for the scenario with two separate queues. ``` def update_func3(system): """Simulate two queues with one server each. system: System object """ # if the first servers is busy, check it it's done if system.q1 > 0 and flip(system.mu): system.q1 -= 1 # if the second queue is busy, check if it's done if system.q2 > 0 and flip(system.mu): system.q2 -= 1 # check for an arrival if flip(system.lam): # join whichever queue is shorter if system.q1 < system.q2: system.q1 += 1 else: system.q2 += 1 system.x = system.q1 + system.q2 ``` Since we added `q1` and `q2` as system variables, we need a new version of `make_system` ``` def make_system(lam, mu): return System(lam=lam, mu=mu, x=0, duration=8*60, q1=0, q2=0) ``` Here are the results for a single run ``` system = make_system(lam, mu) run_simulation(system, update_func3) print(system.L, system.W) plot(system.results) ``` And here are the results for a range of values of `lam` ``` sweep_lam(lam_array, mu, update_func3) decorate(xlabel='Arrival rate (per minute)', ylabel='Average time in system') ``` With two queues, the average of averages is slightly higher, most of the time. But the difference is small. The two configurations are equally good as long as both servers are busy; the only time two lines is worse is if one queue is empty and the other contains more than one customer. In real life, if we allow customers to change lanes, that disadvantage can be eliminated. From a theoretical point of view, one line is better. From a practical point of view, the difference is small and can be mitigated. So the best choice depends on practical considerations. On the other hand, you can do substantially better with an express line for customers with short service times. But that's a topic for another notebook.
github_jupyter
## Grid Manipulations (merge, split, refine, transform) ### Notes Most grid transformations such as `merge` and `transpose` return a new object, allowing consecutive operations to be chained together. Optionally, you can pass `inplace=True` to the call signature to modify the existing object and return `None`. Both approaches are demonstrated below. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas from shapely.geometry import Point, Polygon import geopandas import pygridgen as pgg import pygridtools as pgt ``` ### Basic merging operations The function below create our 3 test model grids moving counter-clockwise in the figure shown two cells down. ``` def to_gdf(df): return ( df.assign(geometry=df.apply(lambda r: Point(r.x, r.y), axis=1)) .drop(columns=['x', 'y']) .pipe(geopandas.GeoDataFrame) ) def make_test_grids(): domain1 = pandas.DataFrame({'x': [2, 5, 5, 2], 'y': [6, 6, 4, 4], 'beta': [1, 1, 1, 1]}) domain2 = pandas.DataFrame({'x': [6, 11, 11, 5], 'y': [5, 5, 3, 3], 'beta': [1, 1, 1, 1]}) domain3 = pandas.DataFrame({'x': [7, 9, 9, 7], 'y': [2, 2, 0, 0], 'beta': [1, 1, 1, 1]}) grid1 = pgt.make_grid(domain=to_gdf(domain1), nx=6, ny=5, rawgrid=False) grid2 = pgt.make_grid(domain=to_gdf(domain2), nx=8, ny=7, rawgrid=False) grid3 = pgt.make_grid(domain=to_gdf(domain3), nx=4, ny=10, rawgrid=False) return grid1, grid2, grid3 ``` Display positions of grids relative to each other ``` grid1, grid2, grid3 = make_test_grids() fig, ax = plt.subplots(figsize=(7.5, 7.5)) _ = grid1.plot_cells(ax=ax, cell_kws=dict(cmap='Blues')) _ = grid2.plot_cells(ax=ax, cell_kws=dict(cmap='Greens')) _ = grid3.plot_cells(ax=ax, cell_kws=dict(cmap='Reds')) ``` #### Merge grids 1 and 2 together, horizontally By default, the bottom rows are aligned and the cell mask is not updated. We do that manually for now. ``` one_two = grid1.merge(grid2, how='horiz') fig, ax = plt.subplots(figsize=(7.5, 7.5)) _ = one_two.plot_cells(ax=ax, cell_kws=dict(cmap='BuPu')) _ = grid3.plot_cells(ax=ax, cell_kws=dict(cmap='Reds')) ``` #### Use the shift parameter to center grid 2 Use `shift=-1` since we're sliding grid 2's i-j indexes downward relative to grid 1 ``` one_two = grid1.merge(grid2, how='horiz', shift=-1) fig, ax = plt.subplots(figsize=(7.5, 7.5)) _ = one_two.plot_cells(ax=ax, cell_kws=dict(cmap='BuPu')) _ = grid3.plot_cells(ax=ax, cell_kws=dict(cmap='Reds')) ``` #### Vertically merge grid 2 and grid 3 Notice that by default, the grids are left-aligned and the *bottom* of grid 3 ties into the *top* of grid 2 ``` two_three = grid2.merge(grid3, how='vert', shift=2) fig, ax = plt.subplots(figsize=(7.5, 7.5)) _ = grid1.plot_cells(ax=ax, cell_kws=dict(cmap='Blues')) _ = two_three.plot_cells(ax=ax, cell_kws=dict(cmap='YlOrBr')) ``` #### Try again, switching the order of the grids Notice the change in sign of the `shift` parameter. ``` two_three = grid3.merge(grid2, how='vert', shift=-2) fig, ax = plt.subplots(figsize=(7.5, 7.5)) _ = grid1.plot_cells(ax=ax, cell_kws=dict(cmap='Blues')) _ = two_three.plot_cells(ax=ax, cell_kws=dict(cmap='YlOrBr')) ``` #### Alternatively, you can switch the arguments and use `where='-'` to indicate that the "other" grid is below the first. And the sign of the `shift` parameter returns to its original value. ``` two_three = grid2.merge(grid3, how='vert', where='-', shift=2) fig, ax = plt.subplots(figsize=(7.5, 7.5)) _ = grid1.plot_cells(ax=ax, cell_kws=dict(cmap='Blues')) _ = two_three.plot_cells(ax=ax, cell_kws=dict(cmap='YlOrBr')) ``` #### Now merge all three in a single chained operation (`inplace=False`). ``` grid1, grid2, grid3 = make_test_grids() all_grids = ( grid2.merge(grid3, how='vert', where='-', shift=2) .merge(grid1, how='horiz', where='-', shift=11) ) fig, ax = plt.subplots(figsize=(7.5, 7.5)) _ = all_grids.plot_cells(ax=ax, cell_kws=dict(cmap='GnBu')) ``` ### Split the final grid into two vertical parts `grid.split(<index of split>, axis=0)` ``` grid_bottom, grid_top = all_grids.split(14, axis=0) fig, ax = plt.subplots(figsize=(7.5, 7.5)) _ = grid_bottom.plot_cells(ax=ax, cell_kws=dict(cmap='OrRd')) _ = grid_top.plot_cells(ax=ax, cell_kws=dict(cmap='BuPu')) ``` ### Splitting and linearly refining columns and rows #### Split the final grid into two horizontal parts `grid.split(<index of split>, axis=1)` ``` grid_left, grid_right = all_grids.split(8, axis=1) fig, ax = plt.subplots(figsize=(7.5, 7.5)) _ = grid_left.plot_cells(ax=ax, cell_kws=dict(cmap='Oranges')) _ = grid_right.plot_cells(ax=ax, cell_kws=dict(cmap='Blues')) ``` #### Refine individual rows of the grid cells `grid.refine(<index of cell>, axis=0, n_points=<num. of divisions>)` ``` fig, ax = plt.subplots(figsize=(7.5, 7.5)) _ = ( all_grids .insert(13, axis=0, n_nodes=2) .plot_cells(ax=ax, cell_kws=dict(cmap='Blues')) ) ``` #### Refine individual columns of the grid cells `grid.refine(<index of cell>, axis=1, n_points=<num. of divisions>)` ``` fig, ax = plt.subplots(figsize=(7.5, 7.5)) _ = ( all_grids .insert(10, axis=1, n_nodes=4) .plot_cells(ax=ax, cell_kws=dict(cmap='Blues')) ) ``` ### Chained operations #### One big chained operation for fun ``` def make_fake_bathy(grid): j_cells, i_cells = grid.cell_shape y, x = np.mgrid[:j_cells, :i_cells] z = (y - (j_cells // 2))** 2 - x return z fig, ax = plt.subplots(figsize=(7.5, 7.5)) g = ( grid2.merge(grid3, how='vert', where='-', shift=2) .merge(grid1, how='horiz', where='-', shift=11) .insert(10, axis=1, n_nodes=4) .insert(13, axis=0, n_nodes=2) .transform(lambda x: x*5 + 2) ) bathy = make_fake_bathy(g) _ = g.plot_cells(ax=ax, cell_kws=dict(cmap='Blues', colors=bathy)) ```
github_jupyter
``` import osmnx as ox import networkx as nx import pandas as pd import numpy as np import matplotlib.cm as cm import matplotlib.colors as colors import json #)载入“一 · 简单地图匹配算法预处理”工作中所获得的道路网路数据,以及San Francisco 数据集中的一个子数据 G = ox.load_graphml('graph.graphml') #list1 = list() #with open("D:\\cabspottingdata\\cabspottingdata\\new_acduou.txt",'r') as f: # for line in f: # co = line.split() # list1.append((ox.get_nearest_node(G,(float(co[0]),float(co[1]))),co[2])) # tuple1 = tuple(list1) # json2 = json.dump(tuple1,open('json2','w')) #定位载入的 San Francisco 数据集中旅程的起始点和终点(原始数据集中有标志位来作为标志。0-空车,1-载客),使用 Osmnx 获得这两点间网 络上的最短路径, #可视化地展示出网络最短路径和实际最短路径,以显示出它们的差别。 G_pro = ox.load_graphml('pro_graph.graphml') route = nx.shortest_path(G_pro,1580501206,65373465) print(G_pro.node[1580501206]) print(G_pro.node[65373465]) ox.plot_graph_route(G_pro, route,bbox=(4182056,4183367,551652,553562)) #可视化的展示实际的最短路径 route = nx.shortest_path(G_pro,1580501206,552853360) print(G_pro.node[1580501206]) print(G_pro.node[552853360]) ox.plot_graph_route(G_pro, route,bbox=(4182056,4182568,551652,552285)) route = nx.shortest_path(G_pro,552853360,65314146) print(G_pro.node[552853360]) print(G_pro.node[65314146]) ox.plot_graph_route(G_pro, route,bbox=(4182208,4182740,551925,552684)) route = nx.shortest_path(G_pro,65314146,65305815) print(G_pro.node[65314146]) print(G_pro.node[65305815]) ox.plot_graph_route(G_pro, route,bbox=(4182380,4182972,552324,552795)) route = nx.shortest_path(G_pro,65305815,1723738831) print(G_pro.node[65305815]) print(G_pro.node[1723738831]) ox.plot_graph_route(G_pro, route,bbox=(4182612,4183058,552435,553157)) route = nx.shortest_path(G_pro,1723738831,65336941) print(G_pro.node[1723738831]) print(G_pro.node[65336941]) ox.plot_graph_route(G_pro, route,bbox=(4182678,4183478,552777,553199)) route = nx.shortest_path(G_pro,65336941,65328969) print(G_pro.node[65336941]) print(G_pro.node[65328969]) ox.plot_graph_route(G_pro, route,bbox=(4183078,4183672,552799,553168)) route = nx.shortest_path(G_pro,65328969,65373364) print(G_pro.node[65328969]) print(G_pro.node[65373364]) ox.plot_graph_route(G_pro, route,bbox=(4183172,4183741,552668,553538)) route = nx.shortest_path(G_pro,65373364,65373362) print(G_pro.node[65373364]) print(G_pro.node[65373362]) ox.plot_graph_route(G_pro, route,bbox=(4183141,4183655,552938,553601)) route = nx.shortest_path(G_pro,65373362,65373465) print(G_pro.node[65373362]) print(G_pro.node[65373465]) ox.plot_graph_route(G_pro, route,bbox=(4183155,4183582,553101,553582)) #使用Osmnx提供的函数获得San Francisco网络的各种图的属性并作记录和保存。 #选择其中几种你认为比较重要的属性(比如结点中心性等),用可视化的方式展示。 # 结点中心性 node_centrality = nx.closeness_centrality(G_pro) #可视化的展示结点中心性并且保存下来矢量图到本地 df = pd.DataFrame(data=pd.Series(node_centrality).sort_values(), columns=['cc']) df['colors'] = ox.get_colors(n=len(df), cmap='inferno', start=0.2) df = df.reindex(G_pro.nodes()) nc = df['colors'].tolist() ox.plot_graph(G_pro, bgcolor='k', save=True, file_format='svg', filename='picture', node_size=30, node_color=nc, node_edgecolor='none', node_zorder=2, edge_color='#555555', edge_linewidth=1.5, edge_alpha=1) ```
github_jupyter
# TASK #1: UNDERSTAND VARIABLES ASSIGNMENT ``` # Define a variable named "x" and assign a number (integer) to it # integer is a whole number (no decimals) that could be positive or negative x = 20 # Let's view "x" print(x) # Define a variable named "y" and assign a number (float) to it # Float are real numbers with a decimal point dividing the integer and fractional parts y = 35.20 # Let's view "y" print(y) # Let's overwrite "y" (assume your portfolio value increased) y= y + 20 # Notice that "y" will only contain the most recent value print(y) # Get the type of "x" which is integer # integer is a whole number (no decimals) that could be positive or negative type(x) # Get the type of "y" which is float # Float are real numbers with a decimal point dividing the integer and fractional parts type(y) ``` MINI CHALLENGE #1: - We defined a variable z and we assigned these 4 values listed below to it. Without executing any code cells, what will these lines of code generate? - Verify your answer by executing the code cells ``` z = 1000 z = 2000 z = 5000 z = 6000 z ``` ``` z = 1000 z = 2000 z = 5000 z = 6000 print(z) ``` # TASK #2: PERFORM MATH OPERATIONS IN PYTHON ``` # Define a variable named i and initialize it with 20 # Let's assume that we want to increment the value by 4 i = 20 i += 4 i # Let's assume that you own a little grocery store # The price of 1 bottle of milk is $3 and we currently have 50 bottles # We can calculate the total dollar value of our inventory as follows: milk = 3 Total_bottle = 50 total_value = milk * Total_bottle total_value # Let's assume you have $550 USD in our bank account # We want to buy x number of IBM stocks using the total amount # each IBM stock is priced at $128 each Account_balance = 550 IBM_stock = 128 X_unit = Account_balance / IBM_stock X_unit # Divide the account balance by Amazon stock price and place the answer in units Account_balance = 600 Amazon_share = 2200 unit = Account_balance / Amazon_share unit ``` MINI CHALLENGE #2: - Write a code that takes in APPLE (AAPL) stock prices at two days and calculate the return: - AAPL price on day 1 = \$135 - AAPL price on day 2 = \$150 ``` Apple_day1 = 135 Apple_day2 = 150 Profite = Apple_day2 / Apple_day1 print(Profite) Difference = Apple_day2 - Apple_day1 Percentage = (Difference / Apple_day1)*100 print(Percentage) ``` # TASK #3: UNDERSTAND PRINT AND INPUT OPERATIONS ``` # Print function is used to print elements on the screen # Define a string x # A string in Python is a sequence of characters # String in python are surrounded by single or double quotation marks x = 'Roshan' # Obtain the data type for 'x' print(x) type(x) # The format() method formats the specified value and insert it in the placeholder # The placeholder is defined using curly braces: {} company_name = "Amazon" shares = 200 print("I own {}'s total shares of {}".format(shares,company_name)) # input is a built-in function in python # Obtain client data such as name, country and e-mail and print them all out on the screen name = input("Enter your name: ") country = input("Enter the country: ") email = input("Enter your email: ") print("my name is {}, I am from {} and my email is {}".format(name,country,email)) ``` MINI CHALLENGE #3: - Write a code that takes in the name of the stock, price at which it is selling, the number of stocks that you want to own and prints out the total funds required to buy this stock. Find a sample expected output below: - Enter the price of the stock you want to buy: 3000 - Enter the number of stocks that you want to buy: 5 - Enter the name of the stock that you want to buy: AMZN - The total funds required to buy 5 number of AMZN stocks at 3000 is: 15000 ``` stock_name = input("ENTER THE NAME OF THE STOCK ") STOCK_PRICE = int(input("Enter the stock's price ")) stocks_buy = int(input("How many stocks you want? ")) total_fund = STOCK_PRICE * stocks_buy print(total_fund) print("You need total of {} amount to buy unit {} of {} shares at the rate of {}".format(total_fund,stocks_buy,stock_name,STOCK_PRICE)) ``` # TASK #4: UNDERSTAND LISTS DATA TYPES ``` # A list is a collection which is ordered and changeable. # List allows duplicate members. List1 = ["name", "email" , 25] print(List1) # Obtain the datatype type(List1) # Access specific elements in the list with Indexing # Note that the first element in the list has an index of 0 (little confusing but you'll get used to it!) print(List1[2]) ``` MINI CHALLENGE #4: - Print the first, second and last element in the list below ``` grocery_list = ['milk', 'rice', 'eggs', 'bread', 'oranges', 'water'] ``` ``` grocery_list = ['milk', 'rice', 'eggs', 'bread', 'oranges', 'water'] print(grocery_list[0]) print(grocery_list[1:3][0][2]) print(grocery_list[-1]) print(grocery_list[5]) ``` # TASK #5: UNDERSTAND COMPARISON OPERATORS AND CONDITIONAL STATEMENTS ``` # Comparison Operator output could be "True" or "False" # Let's cover equal '==' comparison operator first # It's simply a question: "Is x equals y or not?" # "True" output means condition is satisfied # "False" output means Condition is not satisfied (condition is not true) x = 50 y = 60 x == y # Greater than or equal operator '>=' x = 40 y = 30 x >= y # Note that '==' is a comparison operator # Note that '=' is used for variable assignment (put 10 in x) x = 10 x == 10 ``` - A simple if-else statement is written in Python as follows: ``` if condition: statement #1 else: statement #2 ``` - If the condition is true, execute the first indented statement - if the condition is not true, then execute the else indented statements. - Note that Python uses indentation (whitespace) to indicate code sections and scope. ``` # Let's take an input from the user and grant or deny access accordingly user_input = int(input("Enter the number:")) if (user_input <= 50): print("You have been granted the access") else: print("You have been denied the acces") x = int(input(" Enter an integer from 1 to 1000: ")) if x%2 == 0: print("Number is even") else: print("Number is odd") ``` MINI CHALLENGE #5: - Write a code that takes a number from the user and indicates if it's positive or negative ``` number = float(input("Enter the number: ")) if number < 0: print("Number is negative") elif number == 0: print("Number is zero") else: print("Number is positive") ``` # TASK #6: DEVELOP FUNCTIONS IN PYTHON ``` # Define a function that takes in two argument x and y and returns their multiplication def multiply(x,y): z = x*y return z # Call the function multiply(2,8) ``` MINI CHALLENGE #6: - Write a code that takes in three inputs from the user and calculate their sum ``` def sum(): a=int(input("Enter the number: ")) b=int(input("Enter the second number: ")) c=int(input("Enter the third number: ")) return a+b+c sum() def sum(e,f,j): return e+f+j a=int(input("Enter the number: ")) b=int(input("Enter the second number: ")) c=int(input("Enter the third number: ")) sum(a,b,c) ``` # TASK #7: UNDERSTAND FOR AND WHILE LOOPS ``` # List of strings for i in range(10): print(i) g_list= ["milk", "eggs", "rice", "toothpaste", "bread", "oranges", "water"] for i in g_list: print(i) print("Hello {}".format(i)) # Range() generates a list of numbers, which is used to iterate over with for loops. # range() is 0-index based, meaning list indexes start at 0, not 1. # The last integer generated by range() is up to, but not including, last element. # Example: range(0, 7) generates integers from 0 up to, but not including, 7. for i in range(1,6): print() # While loop can be used to execute a set of statements as long as a certain condition holds true. i = 6 while(i<10): print(i) i=i+1 ``` MINI CHALLENGE #7: - Write a code that displays numbers from 1 to 10 using for and while loops ``` for num in range(1,11): print(num) i = 1 while i <= 10: print(i) i+=1 ``` # TASK #8: CAPSTONE PROJECT Develop a guessing game that performs the following: - The system will automatically generate a random number between 1 and 100. - Users can insert any number between 1 and 100 - The program shall be able to compare the number generated by the system and the number that has been entered by the user. The program shall print out one of the following options to help the user improve their next guess: - You are right, great job! - Your guess is low, try again! - your guess is high, try again! - The program exits when the user guess matches the number generated by the system ``` import random sys_number = random.randint(1,100) sys_number number= int(input("Enter your number between 1 and 100: ")) number ``` ``` while (True): if sys_number == number: print("You are right, great job!") break elif sys_number > number: print(" Your guess is low, try again!") number= int(input("Enter your number between 1 and 100: ")) else: print("Your guess is high, try again") number = int(input("Enter your number between 1 and 100: ")) ```
github_jupyter
# Sample Size Experiment using Random Forest and Deep Networks ### Random Forest (RF) vs. Deep Networks (DN) Random forest is inherently a non-parametric model, meaning that the algorithm requires no assumptions about the data distribution. With infinitely many trees and n &rarr; $\infty$, RF will follow non-parametric behavior and will guarantee convergence. Deep Networks with a fixed architecture are entirely parametric. As presented by [Vogelstein, et al. (2020)](https://www.biorxiv.org/content/10.1101/2020.04.29.068460v1), there is a visible bias variance tradeoff between DNs of varying complexity. This is evident by testing each model over a range of sample sizes. At a large enough sample size, a RF model will surpass any parametric DN. The goal of this tutorial is to identify a joint distribution (X,Y) that demonstrates this relationship. RF should profuce a smaller generalization error as small sample sizes, a specific parametric DN should produce a smaller generalization error at medium sample sizes, and RF should once again produce a smaller generalization error at large sample sizes. ### Import necessary packages and modules ``` from functions.sample_size_functions import * import numpy as np import matplotlib.pyplot as plt from scipy import stats %matplotlib inline ``` ### Sparse Parity Distribution The joint distribution used to demonstrate RF convergence is sparse parity. Sparse parity is a _p_-dimensional binary classification problem that generalizes the noisy XOR distribution. Data is generated from a _p_-dimensional feature vector, where each _X_<sub>1</sub>, ... , _X_<sub>p</sub> ~ i.i.d. _U_(-1,1). A parameter _p_* represents the number of informative dimensions, where _p_* < _p_. Class label _Y_ = 0 if there are an even number of positive values among the first _p_* < _p_ dimensions, and _Y_ = 1 if not. Mathematically, we can let _Q_ = $\sum_{j=1}^{p*}$I ( X<sub>j</sub> > 0 ) where _p_* < _p_. The function I ( _X_<sub>j</sub> > 0 ) represents the indicator that the feature at position _j_ is greater than 0. Class label _Y_ returns 1 if _Q_ is odd, and 0 if _Q_ is even. ``` X, y = sparse_parity(num_samples=500, p=5, p_star=2) ``` ### Visualize Sparse Parity Plot the first and second dimensions of the sparse parity distribution. For this plot, `p` = 5 and `p_star` = 2. With only 2 informative dimensions, this plot is equivalent to that of the noisy XOR distribution. ``` fig = plt.figure(figsize=(9, 9)) plt.scatter(X[:, 0], X[:, 1], c=y, cmap="coolwarm") plt.ylabel("X2", fontsize=24) plt.xlabel("X1", fontsize=24) plt.yticks([-1, 0, 1], fontsize=20) plt.xticks([-1, 0, 1], fontsize=20) plt.title("sparse parity: p=5, p*=2", fontsize=24); ``` ### Define Experiment Parameters and Model Hyperparameters #### The cell below defines the sparse parity distribution parameters: `p`: The number of total dimensions in the sparse parity distribution `p_star`: The number of informative dimensions in the sparse parity distribution ``` # Sparse parity parameters p = 14 p_star = 3 ``` #### The cell below defines the RF and DF hyperparameters: `num_trees`: The number of trees in the RF model `max_depth`: Max depth of the RF model `rf_verbose`: The printed output of the RF model `hidden_nodes`: The number of nodes in the hidden layer of the DN `batch_size`: The batch size of the DN `dnn_verbose`: The printed output of the DN model ``` # RF hyperparameters num_trees = 500 max_depth = None rf_verbose = 0 # DN hyperparameters hidden_nodes = 4 batch_size = 3 dn_verbose = 0 ``` #### The cell below defines experiment parameters: `training_sample_sizes`: A list of training set sample sizes to iterate over while training the model `testing_sample_size`: An integer designating the size of the test set `trials`: Number of trials to run the experiment ``` # Experiment parameters training_sample_sizes = [ 500, 1000, 2000, 3000, 5000, 7000, 10000, 12000, 14000, 17000, 20000, ] testing_sample_size = 8000 trials = 5 ``` ### Run the Testing Suite The testing suite trains RF and DN models across all sample sizes and averages accuracies across trials ``` rf_evolution, dn_evolution = test_suite( training_sample_sizes=training_sample_sizes, testing_sample_size=testing_sample_size, trials=trials, p=p, p_star=p_star, num_trees=num_trees, max_depth=None, rf_verbose=rf_verbose, hidden_nodes=hidden_nodes, batch_size=batch_size, dn_verbose=dn_verbose, ) ``` ### Plot and Visualize the Results ``` plot_sample_size_experiment(rf_evolution, dn_evolution, training_sample_sizes, 14, 3) ``` ### Load the Stored Model (Trained with 100 Trials) Increasing the number of trials improves the smoothness of the output, but takes additional time to run. The below cell loads in a model trained with 100 trials. ``` %store -r rf_evolution_100_trials %store -r dn_evolution_100_trials ``` ### Plot and Visualize the Results of 100 Trial Output ``` plot_sample_size_experiment( rf_evolution_100_trials, dn_evolution_100_trials, training_sample_sizes, 14, 3 ) ``` ### Plot and Visualize Alternate Solution An equivalent solution was found using sparse parity parameters `p` = 20 and `p_star` = 2. This leads to a faster convergence and reduced training time. Parameters for this experimental setting are shown below: #### Sparse parity parameters: `p` = 20 `p_star` = 2 #### RF hyperparameters: `num_trees` = 500 `max_depth` = None #### DN hyperparameters: `hidden_nodes` = 36 `batch_size` = 2 #### Experimental parameters: `training_sample_sizes` = [ 200, 300, 400, 500, 700, 1000, 1500, 2000, 2500, 3000, 4000 ] `testing_sample_size` = 2000 `trials` = 100 ``` %store -r rf_evolution_alt %store -r dn_evolution_alt training_sample_sizes = [200,300,400,500,700,1000,1500,2000,2500,3000,4000] plot_sample_size_experiment( rf_evolution_alt, dn_evolution_alt, training_sample_sizes, 20, 2 ) ```
github_jupyter
![DeepNeuro](https://github.com/QTIM-Lab/DeepNeuro/raw/master/package_resources/logos/DeepNeuro_alt.PNG?raw=true) # Wrapping Around External Packages with DeepNeuro Conducting machine learning research in the field of medical imaging poses unique challenges. One of the most pernicious challenges is the wide variety of supporting softwares needed to preprocess and analyze medical imaging data. These packages are often open-source and maintained by academic labs, but may have complicated installation instructions or only work on certain platforms. Furthermore, different scanning modalities and even sequences can require very different preprocessing steps, requiring users to install many different software packages in multi-modality workflows. DeepNeuro seeks to simplify some of these problems by creating Docker containers for common software packages used in medical imaging, and writing easy-to-use Python wrappers around these Docker containers. This let's you quickly use well-known, state-of-the-art methods without having to spend days with a technician installing their requirements. In this tutorial, we will use some of these wrappers to preprocess MR data, create segmentations using a trained neural network, and then save these segmentations to DICOM Segmentation Object (DSO) format. We will use wrappers around the open-source medical imaging package 3DSlicer, and the open-source DICOM conversion package _dcmqi_. We will also use the DeepNeuro docker container to use trained deep learning models for skull-stripping as part of our pipeline. ![3D Slicer](./resources/3D_Slicer_Logo.png?raw=true) <p style="text-align: center"><a href="https://www.slicer.org/"><em>Learn more about 3D Slicer here!</em></a></p> ### Tutorial Requirements You will need to have Docker already installed for this tutorial. You can find some instructions on how to do that here: https://docs.docker.com/install/. Some tutorials on how to use Docker can be found here: https://docker-curriculum.com/. In order to run these Docker containers on the GPU, you will also have to install nvidia-docker. nvidia-docker is an extension to Docker that lets you seamlessly hook up your docker containers to your NVIDIA GPU drivers and supporting software. You can find instructions on how to install nvidia-docker here: https://github.com/NVIDIA/nvidia-docker. ## Downloading Sample Data Our first step is to download some sample data. We will download some DICOM data from TCGA-GBM dataset on the Cancer Imaging Archive (TCIA). ``` from deepneuro.load.load import load load('sample_gbm_dicom', output_datapath='./Sample_Data') ``` ## Pulling the DeepNeuro Docker This dataset is comprised of four MR sequences from one visit of a patient with a high-grade glioma. These sequences are not guaranteed to be oriented in the same patient space, or even be in the same resolution. We also do not have much information about the specific scanner or choices made during sequence development for these images, meaning that voxel intensities may not be in the range we typically expect them to be. If we want Ordinarily, these preprocessing steps are coordinated for you in DeepNeuro modules. However, for this tutorial, we will start from scratch and code the preprocessing steps ourselves. We will do this using the base DeepNeuro Docker container. This container has all off the medical imaging software that DeepNeuro uses pre-installed, and all of the pre-trained models DeepNeuro has to offer pre-downloaded. Our first step will be to pull the Docker container for DeepNeuro from DockerHub. ``` !docker pull qtimlab/deepneuro ``` Easy enough! Let's get on to coding. ## Developing with Docker ## Loading Input DICOM Data ## Applying Preprocessing Steps with 3DSlicer and Pretrained Models ## Saving to DSO Format Using _dcmqi_
github_jupyter
``` #IMPORT SEMUA LIBRARY DISINI #IMPORT LIBRARY PANDAS import pandas as pd #IMPORT LIBRARY POSTGRESQL import psycopg2 from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT #IMPORT LIBRARY CHART from matplotlib import pyplot as plt from matplotlib import style #IMPORT LIBRARY PDF from fpdf import FPDF #IMPORT LIBRARY BASEPATH import io #IMPORT LIBRARY BASE64 IMG import base64 #IMPORT LIBRARY NUMPY import numpy as np #IMPORT LIBRARY EXCEL import xlsxwriter #IMPORT LIBRARY SIMILARITAS import n0similarities as n0 #FUNGSI UNTUK MENGUPLOAD DATA DARI CSV KE POSTGRESQL def uploadToPSQL(host, username, password, database, port, table, judul, filePath, name, subjudul, dataheader, databody): #TEST KONEKSI KE DATABASE try: for t in range(0, len(table)): #DATA DIJADIKAN LIST rawstr = [tuple(x) for x in zip(dataheader, databody[t])] #KONEKSI KE DATABASE connection = psycopg2.connect(user=username,password=password,host=host,port=port,database=database) cursor = connection.cursor() connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT); #CEK TABLE cursor.execute("SELECT * FROM information_schema.tables where table_name=%s", (table[t],)) exist = bool(cursor.rowcount) #KALAU ADA DIHAPUS DULU, TERUS DICREATE ULANG if exist == True: cursor.execute("DROP TABLE "+ table[t] + " CASCADE") cursor.execute("CREATE TABLE "+table[t]+" (index SERIAL, tanggal date, total varchar);") #KALAU GA ADA CREATE DATABASE else: cursor.execute("CREATE TABLE "+table[t]+" (index SERIAL, tanggal date, total varchar);") #MASUKAN DATA KE DATABASE YANG TELAH DIBUAT cursor.execute('INSERT INTO '+table[t]+'(tanggal, total) values ' +str(rawstr)[1:-1]) #JIKA BERHASIL SEMUA AKAN MENGHASILKAN KELUARAN BENAR (TRUE) return True #JIKA KONEKSI GAGAL except (Exception, psycopg2.Error) as error : return error #TUTUP KONEKSI finally: if(connection): cursor.close() connection.close() #FUNGSI UNTUK MEMBUAT CHART, DATA YANG DIAMBIL DARI DATABASE DENGAN MENGGUNAKAN ORDER DARI TANGGAL DAN JUGA LIMIT #DISINI JUGA MEMANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF def makeChart(host, username, password, db, port, table, judul, filePath, name, subjudul, A2, B2, C2, D2, E2, F2, G2, H2,I2, J2, K2, limitdata, wilayah, tabledata, basePath): try: datarowsend = [] for t in range(0, len(table)): #TEST KONEKSI KE DATABASE connection = psycopg2.connect(user=username,password=password,host=host,port=port,database=db) cursor = connection.cursor() #MENGAMBIL DATA DARI DATABASE DENGAN LIMIT YANG SUDAH DIKIRIMKAN DARI VARIABLE DIBAWAH postgreSQL_select_Query = "SELECT * FROM "+table[t]+" ORDER BY tanggal DESC LIMIT " + str(limitdata) cursor.execute(postgreSQL_select_Query) mobile_records = cursor.fetchall() uid = [] lengthx = [] lengthy = [] #MENYIMPAN DATA DARI DATABASE KE DALAM VARIABLE for row in mobile_records: uid.append(row[0]) lengthx.append(row[1]) lengthy.append(row[2]) datarowsend.append(mobile_records) #JUDUL CHART judulgraf = A2 + " " + wilayah[t] #bar style.use('ggplot') fig, ax = plt.subplots() #DATA CHART DIMASUKAN DISINI ax.bar(uid, lengthy, align='center') #JUDUL CHART ax.set_title(judulgraf) ax.set_ylabel('Total') ax.set_xlabel('Tanggal') ax.set_xticks(uid) ax.set_xticklabels((lengthx)) b = io.BytesIO() #BUAT CHART MENJADI FORMAT PNG plt.savefig(b, format='png', bbox_inches="tight") #CHART DIJADIKAN BASE64 barChart = base64.b64encode(b.getvalue()).decode("utf-8").replace("\n", "") plt.show() #line #DATA CHART DIMASUKAN DISINI plt.plot(lengthx, lengthy) plt.xlabel('Tanggal') plt.ylabel('Total') #JUDUL CHART plt.title(judulgraf) plt.grid(True) l = io.BytesIO() #CHART DIJADIKAN GAMBAR plt.savefig(l, format='png', bbox_inches="tight") #GAMBAR DIJADIKAN BAS64 lineChart = base64.b64encode(l.getvalue()).decode("utf-8").replace("\n", "") plt.show() #pie #JUDUL CHART plt.title(judulgraf) #DATA CHART DIMASUKAN DISINI plt.pie(lengthy, labels=lengthx, autopct='%1.1f%%', shadow=True, startangle=180) plt.plot(legend=None) plt.axis('equal') p = io.BytesIO() #CHART DIJADIKAN GAMBAR plt.savefig(p, format='png', bbox_inches="tight") #CHART DICONVERT KE BASE64 pieChart = base64.b64encode(p.getvalue()).decode("utf-8").replace("\n", "") plt.show() #CHART DISIMPAN KE DIREKTORI DIJADIKAN FORMAT PNG #BARCHART bardata = base64.b64decode(barChart) barname = basePath+'jupyter/CEIC/03. Sosio dan Demografi/img/'+name+''+table[t]+'-bar.png' with open(barname, 'wb') as f: f.write(bardata) #LINECHART linedata = base64.b64decode(lineChart) linename = basePath+'jupyter/CEIC/03. Sosio dan Demografi/img/'+name+''+table[t]+'-line.png' with open(linename, 'wb') as f: f.write(linedata) #PIECHART piedata = base64.b64decode(pieChart) piename = basePath+'jupyter/CEIC/03. Sosio dan Demografi/img/'+name+''+table[t]+'-pie.png' with open(piename, 'wb') as f: f.write(piedata) #MEMANGGIL FUNGSI EXCEL makeExcel(datarowsend, A2, B2, C2, D2, E2, F2, G2, H2,I2, J2, K2, name, limitdata, table, wilayah, basePath) #MEMANGGIL FUNGSI PDF makePDF(datarowsend, judul, barChart, lineChart, pieChart, name, subjudul, A2, B2, C2, D2, E2, F2, G2, H2,I2, J2, K2, limitdata, table, wilayah, basePath) #JIKA KONEKSI GAGAL except (Exception, psycopg2.Error) as error : print (error) #TUTUP KONEKSI finally: if(connection): cursor.close() connection.close() #FUNGSI UNTUK MEMBUAT PDF YANG DATANYA BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2 #PLUGIN YANG DIGUNAKAN ADALAH FPDF def makePDF(datarow, judul, bar, line, pie, name, subjudul, A2, B2, C2, D2, E2, F2, G2, H2,I2, J2, K2, lengthPDF, table, wilayah, basePath): #PDF DIATUR DENGAN SIZE A4 DAN POSISI LANDSCAPE pdf = FPDF('L', 'mm', [210,297]) #TAMBAH HALAMAN PDF pdf.add_page() #SET FONT DAN JUGA PADDING pdf.set_font('helvetica', 'B', 20.0) pdf.set_xy(145.0, 15.0) #TAMPILKAN JUDUL PDF pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=judul, border=0) #SET FONT DAN JUGA PADDING pdf.set_font('arial', '', 14.0) pdf.set_xy(145.0, 25.0) #TAMPILKAN SUB JUDUL PDF pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=subjudul, border=0) #BUAT GARIS DIBAWAH SUB JUDUL pdf.line(10.0, 30.0, 287.0, 30.0) pdf.set_font('times', '', 10.0) pdf.set_xy(17.0, 37.0) pdf.set_font('Times','B',11.0) pdf.ln(0.5) th1 = pdf.font_size #BUAT TABLE DATA DATA DI DPF pdf.cell(100, 2*th1, "Kategori", border=1, align='C') pdf.cell(177, 2*th1, A2, border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Region", border=1, align='C') pdf.cell(177, 2*th1, B2, border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Frekuensi", border=1, align='C') pdf.cell(177, 2*th1, C2, border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Unit", border=1, align='C') pdf.cell(177, 2*th1, D2, border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Sumber", border=1, align='C') pdf.cell(177, 2*th1, E2, border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Status", border=1, align='C') pdf.cell(177, 2*th1, F2, border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "ID Seri", border=1, align='C') pdf.cell(177, 2*th1, G2, border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Kode SR", border=1, align='C') pdf.cell(177, 2*th1, H2, border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Tanggal Obs. Pertama", border=1, align='C') pdf.cell(177, 2*th1, str(I2.date()), border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Tanggal Obs. Terakhir ", border=1, align='C') pdf.cell(177, 2*th1, str(J2.date()), border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Waktu pembaruan terakhir", border=1, align='C') pdf.cell(177, 2*th1, str(K2.date()), border=1, align='C') pdf.ln(2*th1) pdf.set_xy(17.0, 125.0) pdf.set_font('Times','B',11.0) epw = pdf.w - 2*pdf.l_margin col_width = epw/(lengthPDF+1) pdf.ln(0.5) th = pdf.font_size #HEADER TABLE DATA F2 pdf.cell(col_width, 2*th, str("Wilayah"), border=1, align='C') #TANGAL HEADER DI LOOPING for row in datarow[0]: pdf.cell(col_width, 2*th, str(row[1]), border=1, align='C') pdf.ln(2*th) #ISI TABLE F2 for w in range(0, len(table)): data=list(datarow[w]) pdf.set_font('Times','B',10.0) pdf.set_font('Arial','',9) pdf.cell(col_width, 2*th, wilayah[w], border=1, align='C') #DATA BERDASARKAN TANGGAL for row in data: pdf.cell(col_width, 2*th, str(row[2]), border=1, align='C') pdf.ln(2*th) #PEMANGGILAN GAMBAR for s in range(0, len(table)): col = pdf.w - 2*pdf.l_margin pdf.ln(2*th) widthcol = col/3 #TAMBAH HALAMAN pdf.add_page() #DATA GAMBAR BERDASARKAN DIREKTORI DIATAS pdf.image(basePath+'jupyter/CEIC/03. Sosio dan Demografi/img/'+name+''+table[s]+'-bar.png', link='', type='',x=8, y=80, w=widthcol) pdf.set_xy(17.0, 144.0) col = pdf.w - 2*pdf.l_margin pdf.image(basePath+'jupyter/CEIC/03. Sosio dan Demografi/img/'+name+''+table[s]+'-line.png', link='', type='',x=103, y=80, w=widthcol) pdf.set_xy(17.0, 144.0) col = pdf.w - 2*pdf.l_margin pdf.image(basePath+'jupyter/CEIC/03. Sosio dan Demografi/img/'+name+''+table[s]+'-pie.png', link='', type='',x=195, y=80, w=widthcol) pdf.ln(4*th) #PDF DIBUAT pdf.output(basePath+'jupyter/CEIC/03. Sosio dan Demografi/pdf/'+A2+'.pdf', 'F') #FUNGSI MAKEEXCEL GUNANYA UNTUK MEMBUAT DATA YANG BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2 #PLUGIN YANG DIGUNAKAN ADALAH XLSXWRITER def makeExcel(datarow, A2, B2, C2, D2, E2, F2, G2, H2, I2, J2, K2, name, limit, table, wilayah, basePath): #BUAT FILE EXCEL workbook = xlsxwriter.Workbook(basePath+'jupyter/CEIC/03. Sosio dan Demografi/excel/'+A2+'.xlsx') #BUAT WORKSHEET EXCEL worksheet = workbook.add_worksheet('sheet1') #SETTINGAN UNTUK BORDER DAN FONT BOLD row1 = workbook.add_format({'border': 2, 'bold': 1}) row2 = workbook.add_format({'border': 2}) #HEADER UNTUK TABLE EXCEL F2 header = ["Wilayah", "Kategori","Region","Frekuensi","Unit","Sumber","Status","ID Seri","Kode SR","Tanggal Obs. Pertama","Tanggal Obs. Terakhir ","Waktu pembaruan terakhir"] #DATA DATA DITAMPUNG PADA VARIABLE for rowhead2 in datarow[0]: header.append(str(rowhead2[1])) #DATA HEADER DARI VARIABLE DIMASUKAN KE SINI UNTUK DITAMPILKAN BERDASARKAN ROW DAN COLUMN for col_num, data in enumerate(header): worksheet.write(0, col_num, data, row1) #DATA ISI TABLE F2 DITAMPILKAN DISINI for w in range(0, len(table)): data=list(datarow[w]) body = [wilayah[w], A2, B2, C2, D2, E2, F2, G2, H2, str(I2.date()), str(J2.date()), str(K2.date())] for rowbody2 in data: body.append(str(rowbody2[2])) for col_num, data in enumerate(body): worksheet.write(w+1, col_num, data, row2) #FILE EXCEL DITUTUP workbook.close() #DISINI TEMPAT AWAL UNTUK MENDEFINISIKAN VARIABEL VARIABEL SEBELUM NANTINYA DIKIRIM KE FUNGSI #PERTAMA MANGGIL FUNGSI UPLOADTOPSQL DULU, KALAU SUKSES BARU MANGGIL FUNGSI MAKECHART #DAN DI MAKECHART MANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF #BASE PATH UNTUK NANTINYA MENGCREATE FILE ATAU MEMANGGIL FILE basePath = 'C:/Users/ASUS/Documents/bappenas/' #FILE SIMILARITY WILAYAH filePathwilayah = basePath+'data mentah/CEIC/allwilayah.xlsx'; #BACA FILE EXCEL DENGAN PANDAS readexcelwilayah = pd.read_excel(filePathwilayah) dfwilayah = list(readexcelwilayah.values) readexcelwilayah.fillna(0) allwilayah = [] #PEMILIHAN JENIS DATA, APA DATA ITU PROVINSI, KABUPATEN, KECAMATAN ATAU KELURAHAN tipewilayah = 'prov' if tipewilayah == 'prov': for x in range(0, len(dfwilayah)): allwilayah.append(dfwilayah[x][1]) elif tipewilayah=='kabkot': for x in range(0, len(dfwilayah)): allwilayah.append(dfwilayah[x][3]) elif tipewilayah == 'kec': for x in range(0, len(dfwilayah)): allwilayah.append(dfwilayah[x][5]) elif tipewilayah == 'kel': for x in range(0, len(dfwilayah)): allwilayah.append(dfwilayah[x][7]) semuawilayah = list(set(allwilayah)) #SETTING VARIABLE UNTUK DATABASE DAN DATA YANG INGIN DIKIRIMKAN KE FUNGSI DISINI name = "03. Statistik Pendidikan (GAC001-GAC003)" host = "localhost" username = "postgres" password = "1234567890" port = "5432" database = "ceic" judul = "Produk Domestik Bruto (AA001-AA007)" subjudul = "Badan Perencanaan Pembangunan Nasional" filePath = basePath+'data mentah/CEIC/03. Sosio dan Demografi/'+name+'.xlsx'; limitdata = int(8) readexcel = pd.read_excel(filePath) tabledata = [] wilayah = [] databody = [] #DATA EXCEL DIBACA DISINI DENGAN MENGGUNAKAN PANDAS df = list(readexcel.values) head = list(readexcel) body = list(df[0]) readexcel.fillna(0) #PILIH ROW DATA YANG INGIN DITAMPILKAN rangeawal = 106 rangeakhir = 107 rowrange = range(rangeawal, rangeakhir) #INI UNTUK MEMFILTER APAKAH DATA YANG DIPILIH MEMILIKI SIMILARITAS ATAU TIDAK #ISIKAN 'WILAYAH' UNTUK SIMILARITAS #ISIKAN BUKAN WILAYAH JIKA BUKAN WILAYAH jenisdata = "Indonesia" #ROW DATA DI LOOPING UNTUK MENDAPATKAN SIMILARITAS WILAYAH #JIKA VARIABLE JENISDATA WILAYAH AKAN MASUK KESINI if jenisdata == 'Wilayah': for x in rowrange: rethasil = 0 big_w = 0 for w in range(0, len(semuawilayah)): namawilayah = semuawilayah[w].lower().strip() nama_wilayah_len = len(namawilayah) hasil = n0.get_levenshtein_similarity(df[x][0].lower().strip()[nama_wilayah_len*-1:], namawilayah) if hasil > rethasil: rethasil = hasil big_w = w wilayah.append(semuawilayah[big_w].capitalize()) tabledata.append('produkdomestikbruto_'+semuawilayah[big_w].lower().replace(" ", "") + "" + str(x)) testbody = [] for listbody in df[x][11:]: if ~np.isnan(listbody) == False: testbody.append(str('0')) else: testbody.append(str(listbody)) databody.append(testbody) #JIKA BUKAN WILAYAH MASUK KESINI else: for x in rowrange: wilayah.append(jenisdata.capitalize()) tabledata.append('produkdomestikbruto_'+jenisdata.lower().replace(" ", "") + "" + str(x)) testbody = [] for listbody in df[x][11:]: if ~np.isnan(listbody) == False: testbody.append(str('0')) else: testbody.append(str(listbody)) databody.append(testbody) #HEADER UNTUK PDF DAN EXCEL A2 = "Data Migas" B2 = df[rangeawal][1] C2 = df[rangeawal][2] D2 = df[rangeawal][3] E2 = df[rangeawal][4] F2 = df[rangeawal][5] G2 = df[rangeawal][6] H2 = df[rangeawal][7] I2 = df[rangeawal][8] J2 = df[rangeawal][9] K2 = df[rangeawal][10] #DATA ISI TABLE F2 dataheader = [] for listhead in head[11:]: dataheader.append(str(listhead)) #FUNGSI UNTUK UPLOAD DATA KE SQL, JIKA BERHASIL AKAN MAMANGGIL FUNGSI UPLOAD CHART sql = uploadToPSQL(host, username, password, database, port, tabledata, judul, filePath, name, subjudul, dataheader, databody) if sql == True: makeChart(host, username, password, database, port, tabledata, judul, filePath, name, subjudul, A2, B2, C2, D2, E2, F2, G2, H2,I2, J2, K2, limitdata, wilayah, tabledata, basePath) else: print(sql) ```
github_jupyter
# Data Science with Python and Dask ## Chapter 5: Cleaning and Transforming DataFrames ``` # Before beginning, set your working directory to where the data resides import os os.chdir('/Users/jesse/Documents') ``` ### Intro Section ``` # Listing 5.1 import dask.dataframe as dd from dask.diagnostics import ProgressBar import numpy as np dtypes = { 'Date First Observed': np.str, 'Days Parking In Effect ': np.str, 'Double Parking Violation': np.str, 'Feet From Curb': np.float32, 'From Hours In Effect': np.str, 'House Number': np.str, 'Hydrant Violation': np.str, 'Intersecting Street': np.str, 'Issue Date': np.str, 'Issuer Code': np.float32, 'Issuer Command': np.str, 'Issuer Precinct': np.float32, 'Issuer Squad': np.str, 'Issuing Agency': np.str, 'Law Section': np.float32, 'Meter Number': np.str, 'No Standing or Stopping Violation': np.str, 'Plate ID': np.str, 'Plate Type': np.str, 'Registration State': np.str, 'Street Code1': np.uint32, 'Street Code2': np.uint32, 'Street Code3': np.uint32, 'Street Name': np.str, 'Sub Division': np.str, 'Summons Number': np.uint32, 'Time First Observed': np.str, 'To Hours In Effect': np.str, 'Unregistered Vehicle?': np.str, 'Vehicle Body Type': np.str, 'Vehicle Color': np.str, 'Vehicle Expiration Date': np.str, 'Vehicle Make': np.str, 'Vehicle Year': np.float32, 'Violation Code': np.uint16, 'Violation County': np.str, 'Violation Description': np.str, 'Violation In Front Of Or Opposite': np.str, 'Violation Legal Code': np.str, 'Violation Location': np.str, 'Violation Post Code': np.str, 'Violation Precinct': np.float32, 'Violation Time': np.str } nyc_data_raw = dd.read_csv('nyc-parking-tickets/*.csv', dtype=dtypes, usecols=dtypes.keys()) ``` ### Section 5.1.1 ``` # Listing 5.2 with ProgressBar(): display(nyc_data_raw['Plate ID'].head()) # Listing 5.3 with ProgressBar(): display(nyc_data_raw[['Plate ID', 'Registration State']].head()) # Listing 5.4 columns_to_select = ['Plate ID', 'Registration State'] with ProgressBar(): display(nyc_data_raw[columns_to_select].head()) ``` ### Section 5.1.2 ``` # Listing 5.5 with ProgressBar(): display(nyc_data_raw.drop('Violation Code', axis=1).head()) # Listing 5.6 violationColumnNames = list(filter(lambda columnName: 'Violation' in columnName, nyc_data_raw.columns)) with ProgressBar(): display(nyc_data_raw.drop(violationColumnNames, axis=1).head()) ``` ### Section 5.1.3 ``` # Listing 5.7 nyc_data_renamed = nyc_data_raw.rename(columns={'Plate ID':'License Plate'}) nyc_data_renamed ``` ### Section 5.1.4 ``` # Listing 5.8 with ProgressBar(): display(nyc_data_raw.loc[56].head(1)) # Listing 5.9 with ProgressBar(): display(nyc_data_raw.loc[100:200].head(100)) # Listing 5.10 with ProgressBar(): some_rows = nyc_data_raw.loc[100:200].head(100) some_rows.drop(range(100, 200, 2)) ``` ### Section 5.2.1 ``` # Listing 5.11 missing_values = nyc_data_raw.isnull().sum() with ProgressBar(): percent_missing = ((missing_values / nyc_data_raw.index.size) * 100).compute() percent_missing ``` ### Section 5.2.2 ``` # Listing 5.12 columns_to_drop = list(percent_missing[percent_missing >= 50].index) nyc_data_clean_stage1 = nyc_data_raw.drop(columns_to_drop, axis=1) ``` ### Section 5.2.3 ``` # Listing 5.13 with ProgressBar(): count_of_vehicle_colors = nyc_data_clean_stage1['Vehicle Color'].value_counts().compute() most_common_color = count_of_vehicle_colors.sort_values(ascending=False).index[0] # Fill missing vehicle color with the most common color nyc_data_clean_stage2 = nyc_data_clean_stage1.fillna({'Vehicle Color': most_common_color}) ``` ### Section 5.2.4 ``` # Listing 5.14 rows_to_drop = list(percent_missing[(percent_missing > 0) & (percent_missing < 5)].index) nyc_data_clean_stage3 = nyc_data_clean_stage2.dropna(subset=rows_to_drop) ``` ### Section 5.2.5 ``` # Listing 5.15 remaining_columns_to_clean = list(percent_missing[(percent_missing >= 5) & (percent_missing < 50)].index) nyc_data_raw.dtypes[remaining_columns_to_clean] # Listing 5.16 unknown_default_dict = dict(map(lambda columnName: (columnName, 'Unknown'), remaining_columns_to_clean)) # Listing 5.17 nyc_data_clean_stage4 = nyc_data_clean_stage3.fillna(unknown_default_dict) # Listing 5.18 with ProgressBar(): print(nyc_data_clean_stage4.isnull().sum().compute()) nyc_data_clean_stage4.persist() ``` ### Section 5.3 ``` # Listing 5.19 with ProgressBar(): license_plate_types = nyc_data_clean_stage4['Plate Type'].value_counts().compute() license_plate_types # Listing 5.20 condition = nyc_data_clean_stage4['Plate Type'].isin(['PAS', 'COM']) plate_type_masked = nyc_data_clean_stage4['Plate Type'].where(condition, 'Other') nyc_data_recode_stage1 = nyc_data_clean_stage4.drop('Plate Type', axis=1) nyc_data_recode_stage2 = nyc_data_recode_stage1.assign(PlateType=plate_type_masked) nyc_data_recode_stage3 = nyc_data_recode_stage2.rename(columns={'PlateType':'Plate Type'}) # Listing 5.21 with ProgressBar(): display(nyc_data_recode_stage3['Plate Type'].value_counts().compute()) # Listing 5.22 single_color = list(count_of_vehicle_colors[count_of_vehicle_colors == 1].index) condition = nyc_data_clean_stage4['Vehicle Color'].isin(single_color) vehicle_color_masked = nyc_data_clean_stage4['Vehicle Color'].mask(condition, 'Other') nyc_data_recode_stage4 = nyc_data_recode_stage3.drop('Vehicle Color', axis=1) nyc_data_recode_stage5 = nyc_data_recode_stage4.assign(VehicleColor=vehicle_color_masked) nyc_data_recode_stage6 = nyc_data_recode_stage5.rename(columns={'VehicleColor':'Vehicle Color'}) ``` ### Section 5.4 ``` # Listing 5.23 from datetime import datetime issue_date_parsed = nyc_data_recode_stage6['Issue Date'].apply(lambda x: datetime.strptime(x, "%m/%d/%Y"), meta=datetime) nyc_data_derived_stage1 = nyc_data_recode_stage6.drop('Issue Date', axis=1) nyc_data_derived_stage2 = nyc_data_derived_stage1.assign(IssueDate=issue_date_parsed) nyc_data_derived_stage3 = nyc_data_derived_stage2.rename(columns={'IssueDate':'Issue Date'}) # Listing 5.24 with ProgressBar(): display(nyc_data_derived_stage3['Issue Date'].head()) # Listing 5.25 issue_date_month_year = nyc_data_derived_stage3['Issue Date'].apply(lambda dt: dt.strftime("%Y%m"), meta=int) nyc_data_derived_stage4 = nyc_data_derived_stage3.assign(IssueMonthYear=issue_date_month_year) nyc_data_derived_stage5 = nyc_data_derived_stage4.rename(columns={'IssueMonthYear':'Citation Issued Month Year'}) # Listing 5.26 with ProgressBar(): display(nyc_data_derived_stage5['Citation Issued Month Year'].head()) ``` ### Section 5.5.1 ``` # Listing 5.27 months = ['201310','201410','201510','201610','201710'] condition = nyc_data_derived_stage5['Citation Issued Month Year'].isin(months) october_citations = nyc_data_derived_stage5[condition] with ProgressBar(): display(october_citations.head()) # Listing 5.28 bound_date = '2016-4-25' condition = nyc_data_derived_stage5['Issue Date'] > bound_date citations_after_bound = nyc_data_derived_stage5[condition] with ProgressBar(): display(citations_after_bound.head()) ``` ### Section 5.5.1 ``` # Listing 5.29 with ProgressBar(): condition = (nyc_data_derived_stage5['Issue Date'] > '2014-01-01') & (nyc_data_derived_stage5['Issue Date'] <= '2017-12-31') nyc_data_filtered = nyc_data_derived_stage5[condition] nyc_data_new_index = nyc_data_filtered.set_index('Citation Issued Month Year') # Listing 5.30 years = ['2014', '2015', '2016', '2017'] months = ['01','02','03','04','05','06','07','08','09','10','11','12'] divisions = [year + month for year in years for month in months] with ProgressBar(): nyc_data_new_index.repartition(divisions=divisions).to_parquet('nyc_data_date_index', compression='snappy') nyc_data_new_index = dd.read_parquet('nyc_data_date_index') ``` ### Section 5.6.1 ``` # Listing 5.31 import pandas as pd nyc_temps = pd.read_csv('nyc-temp-data.csv') nyc_temps_indexed = nyc_temps.set_index(nyc_temps.monthYear.astype(str)) nyc_data_with_temps = nyc_data_new_index.join(nyc_temps_indexed, how='inner') with ProgressBar(): display(nyc_data_with_temps.head(15)) ``` ### Section 5.6.2 ``` # Listing 5.32 fy16 = dd.read_csv('nyc-parking-tickets/Parking_Violations_Issued_-_Fiscal_Year_2016.csv', dtype=dtypes, usecols=dtypes.keys()) fy17 = dd.read_csv('nyc-parking-tickets/Parking_Violations_Issued_-_Fiscal_Year_2017.csv', dtype=dtypes, usecols=dtypes.keys()) fy1617 = fy16.append(fy17) with ProgressBar(): print(fy16['Summons Number'].count().compute()) with ProgressBar(): print(fy17['Summons Number'].count().compute()) with ProgressBar(): print(fy1617['Summons Number'].count().compute()) ``` ### Section 5.7.1 ``` # Listing 5.33 with ProgressBar(): if not os.path.exists('nyc-final-csv'): os.makedirs('nyc-final-csv') nyc_data_with_temps.repartition(npartitions=1).to_csv('nyc-final-csv/part*.csv') # Listing 5.33 with ProgressBar(): if not os.path.exists('nyc-final-csv-compressed'): os.makedirs('nyc-final-csv-compressed') nyc_data_with_temps.to_csv( filename='nyc-final-csv-compressed/*', compression='gzip', sep='|', na_rep='NULL', header=False, index=False) ``` ### Listing 5.7.2 ``` # Listing 5.35 with ProgressBar(): nyc_data_with_temps.to_parquet('nyc_final', compression='snappy') ```
github_jupyter
# Ipyleaflet with vaex ## Repository: https://github.com/vaexio/vaex ## Installation: `conda install -c conda-forge vaex` ``` import vaex import numpy as np np.warnings.filterwarnings('ignore') dstaxi = vaex.open('src/nyc_taxi2015.hdf5') # mmapped, doesn't cost extra memory dstaxi.plot_widget("pickup_longitude", "pickup_latitude", f="log", backend="ipyleaflet", shape=600) dstaxi.plot_widget("dropoff_longitude", "dropoff_latitude", f="log", backend="ipyleaflet", z="dropoff_hour", type="slice", z_shape=24, shape=400, z_relative=True, limits=[None, None, (-0.5, 23.5)]) ds = vaex.datasets.helmi_de_zeeuw.fetch() ds.plot_widget("x", "y", f="log", limits=[-20, 20]) ds.plot_widget("Lz", "E", f="log") ``` # ipyvolume * 3d plotting for Python in the Jupyter notebook based on IPython widgets using WebGL * Glyphs, volume rendering, surfaces/meshes/lines/isosurfaces * Live documentation http://ipyvolume.readthedocs.io/en/stable/ * Installation * `$ conda install -c conda-forge ipyvolume` * `$ pip install ipyvolume` ``` import ipyvolume as ipv import numpy as np np.warnings.filterwarnings('ignore') ipv.example_ylm(); N = 1000 x, y, z = np.random.random((3, N)) fig = ipv.figure() scatter = ipv.scatter(x, y, z, marker='box') ipv.show() scatter.x = scatter.x + 0.1 scatter.color = "green" scatter.size = 5 scatter.color = np.random.random((N,3)) scatter.size = 2 ipv.figure() ipv.style.use('dark') quiver = ipv.quiver(*ipv.datasets.animated_stream.fetch().data[:,::,::4], size=5) ipv.animation_control(quiver, interval=200) ipv.show() ipv.style.use('light') quiver.size = np.random.random(quiver.x.shape) * 10 quiver.color = np.random.random(quiver.x.shape + (3,)) quiver.geo = "cat" # stereo quiver.geo = "arrow" N = 1000*1000 x, y, z = np.random.random((3, N)).astype('f4') ipv.figure() s = ipv.scatter(x, y, z, size=0.2) ipv.show() s.size = 0.1 #ipv.screenshot(width=2048, height=2048) plot3d = ds.plot_widget("x", "y", "z", vx="vx", vy="vy", vz="vz", backend="ipyvolume", f="log1p", shape=100, smooth_pre=0.5) plot3d.vcount_limits = [50, 100000] plot3d.backend.quiver.color = "red" import ipywidgets as widgets widgets.ColorPicker() widgets.jslink((plot3d.backend.quiver, 'color'), (_, 'value')) ipv.save("kapteyn-lunch-talk-2018.html") !open kapteyn-lunch-talk-2018.html # webrtc demo if time permits ``` # A Billion stars in the Jupyter notebook ``` import vaex #gaia = vaex.open("ws://gaia:9000/gaia-dr1") gaia = vaex.open('/Users/maartenbreddels/datasets/gaia/gaia-dr1-minimal_f4.hdf5') %matplotlib inline f"{len(gaia):,}" ra_dec_limits = [[0, 360], [-90, 90]] gaia.set_active_fraction(0.01) gaia.plot_widget("ra", "dec", limits=ra_dec_limits) gaia.mean("phot_g_mean_mag", selection=True) gaia.plot1d("phot_g_mean_mag", selection=False, n=True, limits=[10, 22]) gaia.plot1d("phot_g_mean_mag", selection=True, show=True, n=True, limits=[10, 22]) ```
github_jupyter
# Ch `10`: Concept `02` ## Recurrent Neural Network Import the relevant libraries: ``` import numpy as np import tensorflow as tf from tensorflow.contrib import rnn ``` Define the RNN model: ``` class SeriesPredictor: def __init__(self, input_dim, seq_size, hidden_dim=10): # Hyperparameters self.input_dim = input_dim self.seq_size = seq_size self.hidden_dim = hidden_dim # Weight variables and input placeholders self.W_out = tf.Variable(tf.random_normal([hidden_dim, 1]), name='W_out') self.b_out = tf.Variable(tf.random_normal([1]), name='b_out') self.x = tf.placeholder(tf.float32, [None, seq_size, input_dim]) self.y = tf.placeholder(tf.float32, [None, seq_size]) # Cost optimizer self.cost = tf.reduce_mean(tf.square(self.model() - self.y)) self.train_op = tf.train.AdamOptimizer().minimize(self.cost) # Auxiliary ops self.saver = tf.train.Saver() def model(self): """ :param x: inputs of size [T, batch_size, input_size] :param W: matrix of fully-connected output layer weights :param b: vector of fully-connected output layer biases """ cell = rnn.BasicLSTMCell(self.hidden_dim) outputs, states = tf.nn.dynamic_rnn(cell, self.x, dtype=tf.float32) num_examples = tf.shape(self.x)[0] W_repeated = tf.tile(tf.expand_dims(self.W_out, 0), [num_examples, 1, 1]) out = tf.matmul(outputs, W_repeated) + self.b_out out = tf.squeeze(out) return out def train(self, train_x, train_y): with tf.Session() as sess: tf.get_variable_scope().reuse_variables() sess.run(tf.global_variables_initializer()) for i in range(1000): _, mse = sess.run([self.train_op, self.cost], feed_dict={self.x: train_x, self.y: train_y}) if i % 100 == 0: print(i, mse) save_path = self.saver.save(sess, 'model.ckpt') print('Model saved to {}'.format(save_path)) def test(self, test_x): with tf.Session() as sess: tf.get_variable_scope().reuse_variables() self.saver.restore(sess, './model.ckpt') output = sess.run(self.model(), feed_dict={self.x: test_x}) return output ``` Now, we'll train a series predictor. Let's say we have a sequence of numbers `[a, b, c, d]` that we want to transform into `[a, a+b, b+c, c+d]`. We'll give the RNN a couple examples in the training data. Let's see how well it learns this intended transformation: ``` if __name__ == '__main__': predictor = SeriesPredictor(input_dim=1, seq_size=4, hidden_dim=10) train_x = [[[1], [2], [5], [6]], [[5], [7], [7], [8]], [[3], [4], [5], [7]]] train_y = [[1, 3, 7, 11], [5, 12, 14, 15], [3, 7, 9, 12]] predictor.train(train_x, train_y) test_x = [[[1], [2], [3], [4]], # 1, 3, 5, 7 [[4], [5], [6], [7]]] # 4, 9, 11, 13 actual_y = [[[1], [3], [5], [7]], [[4], [9], [11], [13]]] pred_y = predictor.test(test_x) print("\nLets run some tests!\n") for i, x in enumerate(test_x): print("When the input is {}".format(x)) print("The ground truth output should be {}".format(actual_y[i])) print("And the model thinks it is {}\n".format(pred_y[i])) tested; Gopal ```
github_jupyter
<h1>Today Lesson Outline (28 June 2020) </h1> ## Tuples က ဘာလဲ။ ဘယ်လိုတည်ဆောက်သလဲ။ Tuples ဆိုတာ အစီအစဥ်အတိုင်းစီထားသော ကိန်းစဥ်တစ်မျိုးပါ။ Tuples တွေကို ( ၀ိုက်ကွင်း ) တွေထဲမှာ , ခြားပြီး သတ်မှတ်နိုင်ပါတယ်။ ``` Ratings = (10,9,6,5,10,8,9,6,2) ``` Tuples က Data types တွေကို ရောပြီးသိမ်းထားနိုင်တယ်။ ``` cars = ('Honda', 'Civic', 90) ``` Tuples ထဲက elements တွေကို index number နဲ့ထုတ်လို့ရတယ်။ ``` print(cars[0]) print(cars[-3]) print(cars[1]) print(cars[-2]) print(cars[2]) print(cars[-1]) ``` ## Tuples Operations ### Combination ``` cars = ('Honda', 'Civic', 90) planes = ('Jets', 'F11', 70) vehicles = cars + planes print(vehicles) ``` ### Slicing ``` vehicles[0:3] ``` ### Element အရေအတွက်ကို ကြည့်မယ်။ ``` len(vehicles) ``` ## Tuples ရဲ့ ထူးခြားချက် Tuples က elements တွေကို အခြား data structure တွေလို ပြုပြင်လို့မရပါဘူး။ ``` vehicles[3] = “Borings” ``` ## Tuples : Nesting ``` NT = (1,2,("pop","rock"),(3,4),("disco",(1,2))) NT[2] NT[2][0] NT[2][1] NT[2][1][0] NT[2][1][1] NT[3] NT[3][0] NT[3][1] NT[4] NT[4][0] NT[4][1] NT[4][1][0] NT[4][1][1] ``` <h2 id="set">Sets</h2> <h3 id="content">Set က ဘာကောင်လဲ။</h3> Set ဆိုတာ အစုအဖွဲ့ (collection) ပုံစံတစ်မျိုးပါ။ အစီအစဥ်မကျပါဘူး။ အထဲမှာပါတဲ့ဒေတာမထပ်ပါဘူး။ ### Set ကို ဖန်တီးကြည့်ကြရအောင်။ ``` # Create a set set1 = {"pop", "rock", "soul", "hard rock", "rock", "R&B", "rock", "disco"} set1 ``` The process of mapping is illustrated in the figure: <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/SetsUnique.png" width="1100" /> You can also create a set from a list as follows: ``` # Convert list to set album_list = [ "Michael Jackson", "Thriller", 1982, "00:42:19", \ "Pop, Rock, R&B", 46.0, 65, "30-Nov-82", None, 10.0] album_set = set(album_list) album_set ``` Now let us create a set of genres: ``` # Convert list to set music_genres = set(["pop", "pop", "rock", "folk rock", "hard rock", "soul", \ "progressive rock", "soft rock", "R&B", "disco"]) music_genres ``` <h3 id="op">Set Operations</h3> Let us go over set operations, as these can be used to change the set. Consider the set <b>A</b>: ``` # Sample set A = set(["Thriller", "Back in Black", "AC/DC"]) A ``` We can add an element to a set using the <code>add()</code> method: ``` # Add element to set A.add("NSYNC") A ``` If we add the same element twice, nothing will happen as there can be no duplicates in a set: ``` # Try to add duplicate element to the set A.add("NSYNC") A ``` We can remove an item from a set using the <code>remove</code> method: ``` # Remove the element from set A.remove("NSYNC") A ``` We can verify if an element is in the set using the <code>in</code> command: ``` # Verify if the element is in the set "AC/DC" in A ``` <h3 id="logic">Sets Logic Operations</h3> Consider the following two sets: ``` # Sample Sets album_set1 = set(["Thriller", 'AC/DC', 'Back in Black']) album_set2 = set([ "AC/DC", "Back in Black", "The Dark Side of the Moon"]) ``` <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/SetsSamples.png" width="650" /> ``` # Print two sets album_set1, album_set2 ``` As both sets contain <b>AC/DC</b> and <b>Back in Black</b> we represent these common elements with the intersection of two circles. <img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/SetsLogic.png" width = "650" /> You can find the intersect of two sets as follow using <code>&</code>: ``` # Find the intersections intersection = album_set1 & album_set2 intersection ``` You can find all the elements that are only contained in <code>album_set1</code> using the <code>difference</code> method: ``` # Find the difference in set1 but not set2 album_set1.difference(album_set2) ``` You only need to consider elements in <code>album_set1</code>; all the elements in <code>album_set2</code>, including the intersection, are not included. <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/SetsLeft.png" width="650" /> The elements in <code>album_set2</code> but not in <code>album_set1</code> is given by: ``` album_set2.difference(album_set1) ``` <img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/SetsRight.png" width="650" /> You can also find the intersection of <code>album_list1</code> and <code>album_list2</code>, using the <code>intersection</code> method: ``` # Use intersection method to find the intersection of album_list1 and album_list2 album_set1.intersection(album_set2) ``` This corresponds to the intersection of the two circles: <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/SetsIntersect.png" width="650" /> The union corresponds to all the elements in both sets, which is represented by coloring both circles: <img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/SetsUnion.png" width="650" /> The union is given by: ``` # Find the union of two sets album_set1.union(album_set2) ``` And you can check if a set is a superset or subset of another set, respectively, like this: ``` # Check if superset set(album_set1).issuperset(album_set2) # Check if subset set(album_set2).issubset(album_set1) ``` Here is an example where <code>issubset()</code> and <code>issuperset()</code> return true: ``` # Check if subset set({"Back in Black", "AC/DC"}).issubset(album_set1) # Check if superset album_set1.issuperset({"Back in Black", "AC/DC"}) ``` <hr>
github_jupyter
``` import os os.environ['TRKXINPUTDIR']="/global/cfs/projectdirs/atlas/xju/heptrkx/trackml_inputs/train_all" os.environ['TRKXOUTPUTDIR']= "/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/outtest" import pkg_resources import yaml import pprint import random import time import pickle random.seed(1234) import numpy as np import pandas as pd import itertools import matplotlib.pyplot as plt from tqdm import tqdm from os import listdir from os.path import isfile, join import matplotlib.cm as cm import sys import warnings warnings.filterwarnings('ignore') from os import listdir from os.path import isfile, join import gc # %matplotlib widget sys.path.append('/global/homes/c/caditi97/exatrkx-iml2020/exatrkx/src/') # 3rd party import torch import torch.nn.functional as F from torch_geometric.data import Data from trackml.dataset import load_event from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint # local import from exatrkx import config_dict # for accessing predefined configuration files from exatrkx import outdir_dict # for accessing predefined output directories from exatrkx.src import utils_dir from exatrkx.src import utils_robust from utils_robust import * # for preprocessing from exatrkx import FeatureStore from exatrkx.src import utils_torch # for embedding from exatrkx import LayerlessEmbedding from exatrkx.src import utils_torch from torch_cluster import radius_graph from utils_torch import build_edges from embedding.embedding_base import * # for filtering from exatrkx import VanillaFilter # for GNN import tensorflow as tf from graph_nets import utils_tf from exatrkx import SegmentClassifier import sonnet as snt # for labeling from exatrkx.scripts.tracks_from_gnn import prepare as prepare_labeling from exatrkx.scripts.tracks_from_gnn import clustering as dbscan_clustering # track efficiency from trackml.score import _analyze_tracks from exatrkx.scripts.eval_reco_trkx import make_cmp_plot, pt_configs, eta_configs from functools import partial noise_keep = ["0","0.2", "0.4", "0.6", "0.8", "1"] embed_ckpt_dir = '/global/cfs/cdirs/m3443/data/lightning_models/embedding/checkpoints/epoch=10.ckpt' filter_ckpt_dir = '/global/cfs/cdirs/m3443/data/lightning_models/filtering/checkpoints/epoch=92.ckpt' gnn_ckpt_dir = '/global/cfs/cdirs/m3443/data/lightning_models/gnn' plots_dir = '/global/homes/c/caditi97/exatrkx-iml2020/exatrkx/src/plots/run1000' # needs to change... ckpt_idx = -1 # which GNN checkpoint to load dbscan_epsilon, dbscan_minsamples = 0.25, 2 # hyperparameters for DBScan min_hits = 5 # minimum number of hits associated with a particle to define "reconstructable particles" frac_reco_matched, frac_truth_matched = 0.5, 0.5 # parameters for track matching torch.cuda.is_available() def calc_evts(data_n,rval=1.7,kval=500): matched_idx = [] peta = [] par_pt = [] total_times = [] build_edges = [] build_graphs = [] predict_times = [] filter_times = [] doub_pur = [] doub_eff = [] for data in tqdm(data_n): ############################################# # EMBEDDING # ############################################# device = 'cuda' if torch.cuda.is_available() else 'cpu' e_ckpt = torch.load(embed_ckpt_dir, map_location=device) e_config = e_ckpt['hyper_parameters'] e_config['clustering'] = 'build_edges' e_config['knn_val'] = kval e_config['r_val'] = rval e_model = LayerlessEmbedding(e_config).to(device) e_model.load_state_dict(e_ckpt["state_dict"]) e_model.eval() with torch.no_grad(): # had to move everything to device spatial = e_model(torch.cat([data.cell_data.to(device), data.x.to(device)], axis=-1)) #total_start = time.time() ############################################# # BUILD EDGES # ############################################# edges_start = time.time() e_spatial = utils_torch.build_edges(spatial.to(device), e_model.hparams['r_val'], e_model.hparams['knn_val']) edges_end = time.time() R_dist = torch.sqrt(data.x[:,0]**2 + data.x[:,2]**2) # distance away from origin... e_spatial = e_spatial[:, (R_dist[e_spatial[0]] <= R_dist[e_spatial[1]])] ############################################# # DOUBLET METRICS # ############################################# e_bidir = torch.cat([data.layerless_true_edges,torch.stack([data.layerless_true_edges[1], data.layerless_true_edges[0]], axis=1).T], axis=-1) # did not have to convert e_spatail to tensor?? e_spatial_n, y_cluster = graph_intersection(e_spatial, e_bidir) cluster_true = len(data.layerless_true_edges[0]) cluster_true_positive = y_cluster.sum() cluster_positive = len(e_spatial_n[0]) pur = cluster_true_positive/cluster_positive eff = cluster_true_positive/cluster_true ############################################# # FILTER # ############################################# f_ckpt = torch.load(filter_ckpt_dir, map_location='cpu') f_config = f_ckpt['hyper_parameters'] f_config['train_split'] = [0, 0, 1] f_config['filter_cut'] = 0.18 f_model = VanillaFilter(f_config).to(device) f_model.load_state_dict(f_ckpt['state_dict']) f_model.eval() filter_start = time.time() emb = None # embedding information was not used in the filtering stage. chunks = 10 output_list = [] for j in range(chunks): subset_ind = torch.chunk(torch.arange(e_spatial.shape[1]), chunks)[j] with torch.no_grad(): output = f_model(torch.cat([data.cell_data.to(device), data.x.to(device)], axis=-1), e_spatial[:, subset_ind], emb).squeeze() #.to(device) output_list.append(output) del subset_ind del output gc.collect() output = torch.cat(output_list) output = torch.sigmoid(output) # The filtering network assigns a score to each edge. # In the end, edges with socres > `filter_cut` are selected to construct graphs. # edge_list = e_spatial[:, output.to('cpu') > f_model.hparams['filter_cut']] edge_list = e_spatial[:, output > f_model.hparams['filter_cut']] filter_end = time.time() ############################################# # BUILD GRAPH # ############################################# # ### Form a graph # Now moving TensorFlow for GNN inference. n_nodes = data.x.shape[0] n_edges = edge_list.shape[1] nodes = data.x.cpu().numpy().astype(np.float32) edges = np.zeros((n_edges, 1), dtype=np.float32) senders = edge_list[0].cpu() receivers = edge_list[1].cpu() input_datadict = { "n_node": n_nodes, "n_edge": n_edges, "nodes": nodes, "edges": edges, "senders": senders, "receivers": receivers, "globals": np.array([n_nodes], dtype=np.float32) } input_graph = utils_tf.data_dicts_to_graphs_tuple([input_datadict]) num_processing_steps_tr = 8 optimizer = snt.optimizers.Adam(0.001) model = SegmentClassifier() output_dir = gnn_ckpt_dir checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) ckpt_manager = tf.train.CheckpointManager(checkpoint, directory=output_dir, max_to_keep=10) status = checkpoint.restore(ckpt_manager.checkpoints[ckpt_idx]).expect_partial() # clean up GPU memory del e_spatial del e_model del f_model gc.collect() if device == 'cuda': torch.cuda.empty_cache() graph_start = time.time() outputs_gnn = model(input_graph, num_processing_steps_tr) output_graph = outputs_gnn[-1] graph_end = time.time() ############################################# # TRACK LABELLING # ############################################# predict_start = time.time() input_matrix = prepare_labeling(tf.squeeze(output_graph.edges).cpu().numpy(), senders, receivers, n_nodes) predict_tracks = dbscan_clustering(data.hid.cpu(), input_matrix, dbscan_epsilon, dbscan_minsamples) # trkx_groups = predict_track_df.groupby(['track_id']) # all_trk_ids = np.unique(predict_track_df.track_id) # n_trkxs = all_trk_ids.shape[0] # predict_tracks = [trkx_groups.get_group(all_trk_ids[idx])['hit_id'].to_numpy().tolist() for idx in range(n_trkxs)] predict_end = time.time() ############################################# # END-TO-END METRICS # ############################################# evt_path = data.event_file m_idx, pt, p_pt = track_eff(evt_path, predict_tracks,min_hits,frac_reco_matched, frac_truth_matched) #total_end = time.time() ############################################# # SAVE TO LIST # ############################################# #total_times.append(total_end-total_start) build_edges.append(edges_end-edges_start) predict_times.append(predict_end-predict_start) filter_times.append(filter_end-filter_start) build_graphs.append(graph_end-graph_start) matched_idx.append(m_idx) peta.append(pt) par_pt.append(p_pt) doub_pur.append(pur) doub_eff.append(eff) this_dict = { 'matched_idx' : matched_idx, 'peta' : peta, 'par_pt' : par_pt, 'doublet_purity' : doub_pur, 'doublet_efficiency' : doub_eff, #'total_times' : total_times, 'build_edges' : build_edges, 'build_graphs' : build_graphs, 'filter_times' : filter_times, 'predict_times' : predict_times } return this_dict def create_pickle(data_n,rval=1.7,kval=500): dictn = calc_evts(data_n,rval,kval) with open(f'/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/val/lists_n{rval}.pickle', 'wb') as handle: pickle.dump(dictn, handle) def get_data_np(mypath): onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))][:20] data_n = [] for file in onlyfiles: data = torch.load(join(mypath,file)) data_n.append(data) return data_n def open_pickle(pickle_dir): with open(pickle_dir, 'rb') as handle: unpickler = pickle.Unpickler(handle) b = unpickler.load() return b r_vals = np.arange(1,2,0.1) knn_vals = np.arange(400,501,10) noise_dir = f'/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/layerless_check/n1/feature_store' data_n = get_data_np(noise_dir) # for r in r_vals: # print(f"------ r_val {r}------") # create_pickle(data_n,rval=r) # print("--------------------") create_pickle(data_n,rval=2.0) plt.rcParams.update({'axes.titlesize' : 16, 'axes.labelsize' : 16, 'lines.linewidth' : 2, 'lines.markersize' : 10, 'xtick.labelsize' : 14, 'xtick.major.width' : 2, 'ytick.labelsize' : 14, 'ytick.major.width' : 2, 'grid.alpha' : 0.5, "legend.frameon" : False, 'legend.fontsize' : 16}) def get_pickle_data(pickle_path,n): list_data = {} list_data[f'{n}'] = open_pickle(pickle_path) d = list_data[f'{n}'] p = d['doublet_purity'] p = np.mean(p) e = d['doublet_efficiency'] e = np.mean(e) b = d['build_edges'] b = np.mean(b) g = d['build_graphs'] g = np.mean(g) return p,e,b,g pur = [] eff = [] be = [] gt = [] for r in r_vals: pickle_path = f"/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/val/lists_n{r}.pickle" p,e,b,g = get_pickle_data(pickle_path,r) pur.append(p) eff.append(e) be.append(b) gt.append(g) import pandas as pd from pandas import DataFrame as df d = {'r_val': r_vals, 'Purity' : pur, 'Efficiency' : eff, 'Build Edges (s)': be, 'Build Graphs (s)' : gt} table = df(data=d) table fig,ax = plt.subplots(2,2,figsize = (15,15)) ax[0][0].plot(r_vals,pur) ax[0][0].set_xlabel('r_val') ax[0][0].set_ylabel('Purity') ax[0][0].set_xticks(r_vals) ax[0][1].plot(r_vals,eff) ax[0][1].set_xlabel('r_val') ax[0][1].set_ylabel('Efficiency') ax[0][1].set_xticks(r_vals) ax[1][0].plot(r_vals,be) ax[1][0].set_xlabel('r_val') ax[1][0].set_ylabel('Build Edges Wall Time (s)') ax[1][0].set_xticks(r_vals) ax[1][1].plot(r_vals,gt) ax[1][1].set_xlabel('r_val') ax[1][1].set_ylabel('Build Graphs Wall Time (s)') ax[1][1].set_xticks(r_vals) fig.suptitle("Change in r_val at 20% noise") ```
github_jupyter
# Internal Datastructure: Bus branch model, Admittance and Jacobian Matrix This jupyter notebooks explains how to access and interpret the internal datastructure with relevant matrices. ### Internal Datastructure We use the simple example network from the create_simple tutorial as an example for how to access internal calculation parameters: <img src="pics/example_network_simple.png"> ``` import pandapower as pp import pandapower.networks as nw net = nw.example_simple() print(net) ``` First, we run a power flow in this network: ``` pp.runpp(net) ``` When a power flow is carried out, the element based grid model is translated into a bus-branch model. That bus-branch model is stored in a data structure that is based on the PYPOWER/MATPOWER casefile (with some extensions). This ppc can be accesed after power flow: ``` net._ppc ``` For information on how this datastructure is defined, please refer to the MATPOWER documentation. **Note:** For linear power flow (DC load flow) 'Ybus' is no longer created, but 'Bbus' as a new 'internal' key. ``` pp.rundcpp(net) net._ppc['internal']['Bbus'].A ``` ## Nodal Point Admittance Matrix The nodal point admittance matrix is saved in the ppc and can be accessed directly: ``` pp.runpp(net) net._ppc["internal"]["Ybus"].todense() ``` Note that the nodal point admittance matrix is given in per unit values. ## Jacobian Matrix The jacobian Matrix J in the last iteration step is also stored in the ppc and can be accessed: ``` net._ppc["internal"]["J"].todense() ``` The jacobian matrix is also given in per unit values. ## Mapping the Buses The pandapower indices are not equal to the ppc indices for several reasons. Some buses are fused together in case of closed bus-bus switches and auxiliary buses are created for elements like extended wards or three winding transformers. See here for more details: https://pandapower.readthedocs.io/en/latest/elements/switch.html There is however a mapping between pandapower indices and ppc indices that is created during the conversion to keep track of the dependencies that is also stored in the net: ``` net._pd2ppc_lookups["bus"] ``` To get a ppc index from the pandapower index, simply call the lookup like this: ``` pandapower_bus_idx = 3 ppc_index = net._pd2ppc_lookups["bus"][pandapower_bus_idx] print(ppc_index) ``` As you can see, pandapower bus index 3 corresponds to ppc bus index 2. So if we would like to find the diagonal entry of the Ybus matrix for bus 2, we could now access it with that internal index: ``` Ybus = net._ppc["internal"]["Ybus"] int_idx = net._pd2ppc_lookups["bus"][ppc_index] Ybus[int_idx, int_idx] ``` We can also see that some buses are mapped to the same internal bus, such as bus 1 and bus 2: ``` print(net._pd2ppc_lookups["bus"][1]) print(net._pd2ppc_lookups["bus"][2]) ``` That is because buses 1 and 2 are connected by a closed bus-bus switch and are therefore represented internally as the same bus: ``` net.switch.loc[0] ``` ## The pandapower indices are not equal to the ppc indices for several reasons. Some buses are fused together in case of closed bus-bus switches and auxiliary buses are created for elements like extended wards or three winding transformers. There is however a mapping between pandapower indices and ppc indices that is created during the conversion to keep track of the dependencies that is also stored in the net: ## Obtaining Jacobian Entries of Generators As an example we show how to obtain the Jacobian entries of generator buses using the pandapower -> ppc bus mapping. First we get buses of the in-service generators: ``` gen_buses = net.gen.loc[net.gen.in_service.values, "bus"].values print(f"pandapower gen bus: {gen_buses}") ``` Second, we geht the Jacobian matrix: ``` J = net._ppc["internal"]["J"] print(f"Jacobian shape: {J.shape}") ``` Why has the Jacobian the shape 9x9? It consists of the partial derivatives J11 = dP_dVa, J12 = dP_Vm, J21 = dQ_dVa, J22 = dQ_dVm. Except the reference bus, all PV- and PQ-buses are included in J. Vm is constant for PV nodes and dS/dVm is 0 for PV-buses (gens in pandapower) and Q is a variable. In our case we have 1 reference bus (at bus 0), 1 gen at bus 5, and 3 pq buses (at buses 1, 2, 4) This is the reason why J11 to J22 have these shapes: J11 = pvpq x pvpq (dP_dVa) J12 = pvpq x pq (dP_dVm) J21 = pq x pvpq (dQ_dVa) J22 = pq x pq (dQ_dVm) Only J11 contains values relevant for gens. ``` bus_lookup = ppc_index = net._pd2ppc_lookups["bus"] print(f"pandapower to ppc lookup: {bus_lookup}") ppc_gen_buses = bus_lookup[gen_buses] print(f"pandapower gen bus: {gen_buses} maps to ppc gen bus: {ppc_gen_buses}") ``` Now, we need the pv and pq entries in J to obtain the Jacobian sub-matrices: ``` import numpy as np # get pv and pq values from newtonpf() pv = net._ppc["internal"]["pv"] pq = net._ppc["internal"]["pq"] # stack these as done in newtonpf() pvpq = np.hstack((pv, pq)) print("pv and pq nodes as in the newtonpf() function") print(f"pv buses: {pv}\npq buses: {pq}\npvpq buses: {pvpq}") # get len of pv and pq n_pvpq = len(pvpq) n_pq = len(pq) n_pv = len(pv) # get J11, J12, J21, and J22 j11 = J[:n_pvpq, :n_pvpq] j12 = J[:n_pvpq, n_pvpq:] j21 = J[n_pvpq:, :n_pvpq] j22 = J[n_pvpq:, n_pvpq:] print("shape of J sub-matrices:") print(f"j11 = {j11.shape}") print(f"j12 = {j12.shape}") print(f"j21 = {j21.shape}") print(f"j22 = {j22.shape}") ``` Now, we finally get the generator entries in J: ``` # j11 gen entries m = np.isin(pvpq, pv) n = m j11_gen_entries = j11[m, n] print(f"J11 indices: m = {m}, n = {n}") print(f"pandapower gen {gen_buses} entries (ppc PV nodes {ppc_gen_buses}) in J11 (=dP/dVa): {j11_gen_entries}") ```
github_jupyter
``` ## Done from google.colab import drive; drive.mount("/content/drive") import pandas as pd import numpy as np import nltk import re nltk.download("stopwords");nltk.download("punkt") import sklearn data1 = pd.read_excel("/content/drive/My Drive/DimasASu/Data Latih BDC.xlsx") data2 = pd.read_ csv("/content/drive/My Drive/DimasASu/data_clean.csv") dataTest = pd.read_csv("/content/drive/My Drive/DimasASu/datatest_labelled.csv") data1.head() , data2.head() data1 = data1[["judul","narasi","label"]] data2 = data2[["judul","narasi","label"]] df_train = pd.concat([data1,data2]) df_train = df_train.reset_index().drop("index",axis = 1) df_train.tail() def clean(text_messages): text_messages = str(text_messages) processed = re.sub(r'^.+@[^\.].*\.[a-z]{2,}$', 'alamat email',text_messages) # Replace URLs with 'webaddress' processed = re.sub(r'^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$', 'alamat web',processed) # Replace money symbols with 'moneysymb' (£ can by typed with ALT key + 156) processed = re.sub(r'£|\$', 'moneysymb',processed) # Replace 10 digit phone numbers (formats include paranthesis, spaces, no spaces, dashes) with 'phonenumber' processed = re.sub(r'^\(?[\d]{3}\)?[\s-]?[\d]{3}[\s-]?[\d]{4}$', 'nomor hp',processed) # Replace numbers with 'numbr' processed = re.sub(r'\d+(\.\d+)?', 'numbr',processed) # Remove punctuation processed = re.sub(r'[^\w\d\s]', ' ',processed) # Replace whitespace between terms with a single space processed = re.sub(r'\s+', ' ',processed) # Remove leading and trailing whitespace processed = re.sub(r'^\s+|\s+?$', '',processed) processed = processed.lower() return processed df_train["narasi"] = df_train["narasi"].apply(clean) df_train["judul"] = df_train["judul"].apply(clean) from nltk.corpus import stopwords stopwordsObj = set(stopwords.words("indonesian")) df_train["narasi"] = df_train["narasi"].apply(lambda sentence: " ".join(word for word in sentence.split(" ") if word not in stopwordsObj)) df_train["judul"] = df_train["judul"].apply(lambda sentence: " ".join(word for word in sentence.split(" ") if word not in stopwordsObj)) porterStemmer = nltk.PorterStemmer() df_train["narasi"] = df_train["narasi"].apply(lambda sentence: " ".join(porterStemmer.stem(word) for word in sentence.split(" ")) ) df_train["narasi"].head() from nltk.tokenize import word_tokenize corpus = [] for message in df_train["narasi"]: words = word_tokenize(message) for word in words: corpus.append(word) for message in df_train["judul"]: words = word_tokenize(message) for word in words: corpus.append(word) corpus = nltk.FreqDist(corpus) corpus word_features = list(corpus.keys())[:5000] def find_features(message): message = str(message) words = word_tokenize(message) features = {} for word in word_features: features[word] = word in words return features messages = list(zip(df_train["narasi"],df_train["label"])) np.random.seed(1) print(messages) featuresets = [(find_features(text),label) for (text,label) in messages] featuresets[:5] from sklearn.model_selection import train_test_split training,testing = train_test_split(featuresets,test_size = 0.05,random_state = 1) print(len(training), len(testing)) from nltk import SklearnClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression, SGDClassifier from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC from sklearn.metrics import classification_report, accuracy_score, confusion_matrix # Define models to train names = ["K Nearest Neighbors", "Decision Tree", "Random Forest", "Logistic Regression", "SGD Classifier", "Naive Bayes", "SVM Linear"] classifiers = [ KNeighborsClassifier(), DecisionTreeClassifier(), RandomForestClassifier(), LogisticRegression(), SGDClassifier(max_iter = 100), MultinomialNB(), SVC(kernel = 'linear') ] models = zip(names, classifiers) for name, model in models: nltk_model = SklearnClassifier(model) nltk_model.train(training) accuracy = nltk.classify.accuracy(nltk_model, testing)*100 print("{} Accuracy: {}".format(name, accuracy)) from sklearn.ensemble import VotingClassifier names = ["K Nearest Neighbors", "Decision Tree", "Random Forest", "Logistic Regression", "SGD Classifier", "Naive Bayes", "SVM Linear"] classifiers = [ KNeighborsClassifier(), DecisionTreeClassifier(), RandomForestClassifier(), LogisticRegression(), SGDClassifier(max_iter = 100), MultinomialNB(), SVC(kernel = 'linear') ] models = list(zip(names, classifiers)) nltk_ensemble = SklearnClassifier(VotingClassifier(estimators = models, voting = 'hard', n_jobs = -1)) nltk_ensemble.train(training) accuracy = nltk.classify.accuracy(nltk_model, testing)*100 print("Voting Classifier: Accuracy: {}".format(accuracy)) ```
github_jupyter
# Demo This notebook demonstrates the basic functionality of the `perfectns` package; for background see the dynamic nested sampling paper [(Higson at al., 2019a)](https://doi.org/10.1007/s11222-018-9844-0). ### Running nested sampling calculations The likelihood $\mathcal{L}(\theta)$, prior $\pi(\theta)$ and calculation settings are specified in a PerfectNSSettings object. For this example we will use a 10-dimensional spherically symmetric Gaussian likelihood with size $\sigma_\mathcal{L}=1$ and a Gaussian prior with size $\sigma_{\pi}=10$. ``` import perfectns.settings import perfectns.likelihoods as likelihoods import perfectns.priors as priors # Input settings settings = perfectns.settings.PerfectNSSettings() settings.likelihood = likelihoods.Gaussian(likelihood_scale=1) settings.prior = priors.Gaussian(prior_scale=10) settings.n_dim = 10 settings.ninit = 10 settings.nlive_const = 100 ``` The "dynamic_goal" setting determines if dynamic nested sampling should be used and, if so, how to split the computational effort between increasing parameter estimation accuracy and evidence calculation accuracy. dynamic_goal=1 optimises purely for parameter estimation and dynamic_goal=0 optimises purely for calculating the Bayesian evidence $\mathcal{Z}$. Lets try running standard nested sampling and dynamic nested sampling calculation: ``` import perfectns.nested_sampling as nested_sampling # Perform standard nested sampling settings.dynamic_goal = None standard_ns_run = nested_sampling.generate_ns_run(settings, random_seed=0) # set random_seed for reproducible results # Perform dynamic nested sampling settings.dynamic_goal = 1 # optimise for parameter estimation accuracy dynamic_ns_run = nested_sampling.generate_ns_run(settings, random_seed=0) # set random_seed for reproducible results ``` We can now make posterior inferences using the samples generated by the nested sampling calculations using the utility functions from ``nestcheck``. Here we calculate: 1\. the log Bayesian evidence $\log \mathcal{Z}=\log \left( \int \mathcal{L}(\theta) \pi(\theta) \mathrm{d}\theta \right)$, 2\. the mean of the first parameter $\theta_1$, 3\. the second moment of the posterior distribution of $\theta_1$, 4\. the median of $\theta_1$, 5\. the 84% one-tailed credible interval on $\theta_1$. For the Gaussian likelihood and prior we can calculate the posterior distribution analytically, so we first calculate the analytic values of each quantity for comparison. The results are displayed in a `pandas` DataFrame. ``` import perfectns.estimators as e import nestcheck.ns_run_utils import pandas as pd estimator_list = [e.LogZ(), e.ParamMean(), e.ParamSquaredMean(), e.ParamCred(0.5), e.ParamCred(0.84)] estimator_names = [est.latex_name for est in estimator_list] results = pd.DataFrame([nestcheck.ns_run_utils.run_estimators(standard_ns_run, estimator_list), nestcheck.ns_run_utils.run_estimators(dynamic_ns_run, estimator_list)], columns=estimator_names, index=['standard run', 'dynamic run']) # Add true values for comparison results.loc['true values'] = e.get_true_estimator_values(estimator_list, settings) results ``` ### Estimating sampling errors You can estimate the numerical uncertainties on these results by calculating the standard deviation of the sampling errors distributions each run using the bootstrap resampling approach described in [Higson et al. (2018)](https://doi.org/10.1214/17-BA1075) (implemented in `nestcheck`). ``` import numpy as np import nestcheck.error_analysis np.random.seed(0) results.loc['standard unc'] = nestcheck.error_analysis.run_std_bootstrap( standard_ns_run, estimator_list, n_simulate=200) results.loc['dynamic unc'] = nestcheck.error_analysis.run_std_bootstrap( dynamic_ns_run, estimator_list, n_simulate=200) results.loc[['standard unc', 'dynamic unc']] ``` This approach works for both dynamic and standard nested sampling. In addition as `perfectns` can perform the nested sampling algorithm "perfectly" there are no additional errors from implementation-specific effects such as correlated samples (see [Higson et al., 2019b](http://doi.org/10.1093/mnras/sty3090) for a detailed discussion). ### Generating and analysing runs in parallel Multiple nested sampling runs can be generated and analysed in parallel (using `parallel_utils` from `nestcheck`). ``` import numpy as np import nestcheck.parallel_utils as pu import nestcheck.pandas_functions as pf # Generate 100 nested sampling runs run_list = nested_sampling.get_run_data(settings, 100, save=False, load=False, random_seeds=list(range(100))) # Calculate posterior inferences for each run values = pu.parallel_apply(nestcheck.ns_run_utils.run_estimators, run_list, func_args=(estimator_list,)) # Show the mean and standard deviation of the calculation results multi_run_tests = pf.summary_df_from_list(values, estimator_names) multi_run_tests ``` ### Comparing dynamic and standard nested sampling performance Lets now compare the performance of dynamic and standard nested sampling, using the 10-dimensional Gaussian likelihood and prior. ``` import perfectns.results_tables as rt # Input settings settings = perfectns.settings.PerfectNSSettings() settings.likelihood = likelihoods.Gaussian(likelihood_scale=1) settings.prior = priors.Gaussian(prior_scale=10) settings.ninit = 10 settings.nlive_const = 100 settings.n_dim = 10 # Run results settings dynamic_results_table = rt.get_dynamic_results(100, [0, 1], estimator_list, settings, save=False, load=False) dynamic_results_table[estimator_names] ``` Looking at the `std efficiency gain` rows, you should see that dynamic nested sampling targeted at parameter estimation (dynamic goal=1) has an efficiency gain (equivalent computational speedup) for parameter estimation (columns other than $\log \mathcal{Z}$) of factor of around 3 compared to standard nested sampling. Similar results tables for different likelihoods can be found in the dynamic nested sampling paper [(Higson at al., 2019a)](https://doi.org/10.1007/s11222-018-9844-0). For more information about the get_dynamic_results function look at its documentation. ### Comparing bootstrap error estimates to observed distributions of results We can check if the bootstrap estimates of parameter estimation sampling errors are accurate, using a 3d Gaussian likelihood and Gaussian prior. ``` settings.likelihood = likelihoods.Gaussian(likelihood_scale=1) settings.prior = priors.Gaussian(prior_scale=10) settings.n_dim = 3 bootstrap_results_table = rt.get_bootstrap_results(50, 50, # 100, 200, estimator_list, settings, n_run_ci=20, n_simulate_ci=200, # n_simulate_ci=1000, add_sim_method=False, cred_int=0.95, ninit_sep=True, parallel=True) bootstrap_results_table ``` Note that every second column gives an estimated numerical uncertainty on the values in the previous column. You should see that the ratio of the bootstrap error estimates to bootstrap_results the standard deviation of results (row 4 of bootstrap_results_table) has values close to 1 given the estimated numerical uncertainties. Similar results are given in the appendix of the dynamic nested sampling paper [(Higson, 2019a)](https://doi.org/10.1007/s11222-018-9844-0); see the paper and the get_bootstrap_results function's documentation for more details.
github_jupyter
<a id='start'></a> # Introduction to Python #### In questo primo notebook introdurremo i concetti fondamentali per iniziare ad usare Python Il notebook è così suddiviso: <br> 1) [Hello, Python](#section1)<a href='#section1'></a> <br> 2) [Le funzioni](#section2)<a href='#section2'></a><br> 3) [Booleans & Condizioni](#section3)<a href='#section3'></a> <br> 4) [Le liste](#section4)<a href='#section4'></a> <br> 5) [Loops](#section5)<a href='#section5'></a><br> 6) [Le stringe](#section6)<a href='#section6'></a><br> 7) [I dizionari](#section7)<a href='#section7'></a><br> 8) [Le librerie esterne](#section8)<a href='#section8'></a><br> 9) [Extra: Bonus, Pythonic Code!](#section9)<a href='section9'></a> Inserite sempre questo piccolo pezzo di codice nei vostri notebooks, consente di caricare automaticamente il notebook e vi permette (soprattutto nelle lezioni future) di avere i grafici inline ``` # Put these at the top of every notebook, to get automatic reloading and inline plotting %reload_ext autoreload %autoreload 2 %matplotlib inline ``` <a id='section1'></a> ## 1) Hello, Python! Python è un linguaggio interpretato, ovvero a differenza di C++ (che è un linguaggio compilato), esegue il codice riga-per-riga, mentre C++ compila il codice e successivamente lo esegue. <br> Il vantaggio dei linguaggi di programmazione "interpretati" è che sono più facili "da leggere", ma più lenti in esecuzione rispetto ad un linguaggio compilato. <br> Proviamo a leggere il seguente codice e a ipotizzare quale sarà il suo output: ``` # Importo la libreria random # Servirà per generare numeri casuali import random as rd pere = 0 # Genero un numero casuale tra 0 e 10 Pere_comprate = rd.randint(0, 10) Pere_totali = pere + Pere_comprate if Pere_totali > 0: print("Ho", Pere_totali, "pere") else: print("Non ho nessuna pera.") print("Esempio finito.") ``` In questo piccolo script appena letto è possibile notare già alcuni aspetti della sintassi di Python e della sua semantica (ovvero come Python lavora).<br> Partiamo dalla prima linea di codice: ``` import random as rd ``` La funzione **import** serve ad importare una libreria in Python, e come vedremo ci sono molte librerie che possono essere molto utili per svolgere le nostre analisi. <br> Insieme ad *import* abbiamo usato **as** che ci ha permesso di denominare la libreria con una parola più breve (rd). Successivamente abbiamo identificato una variabile e le abbiamo assegnato un valore: ``` pere = 0 ``` Come possiamo notare non è stato necessario definire prima la tipologia di variabile, Python non ha bisogno di sapere in anticipo quale sarà la tipologia di variabile che stiamo definendo. ``` # Genero un numero casuale tra 0 e 10 Pere_comprate = rd.randint(0, 10) Pere_totali = pere + Pere_comprate ``` In Python i commenti vengono inseriti utilizzando il simbolo **#** <br> Nel codice sopra possiamo notare come è stata richiamata una funzione che fa parte della libreria "random", definita inizialmente con l'acronimo "rd"; la funzione usata in questo caso è **randint** che serve a generare un numero intero casuale nell'intervallo definito dagli input assegnati alla funzione. ``` if Pere_totali > 0: print("Ho", Pere_totali, "pere") else: print("Non ho nessuna pera.") print("Esempio finito.") ``` I due punti " **:** " alla fine della linea dell'if indicano che inizia un "nuovo blocco di codice", perciò le linee di codice appartenenti a questo blocco devono essere indentate (ovvero iniziare dopo 4 spazi). <br> L'ultima linea di codice " *print("Esempio finito.")* " sarà fuori dall'if poichè non è indentato. **Print** è una funzione preimpostata di Python che mostra a schermo ciò che si inserisce in input nella funzione. <br> Le funzioni di Python vengono chiamate inserendo tra parentesi gli input dopo il nome della funzione. è possibile anche utilizzare il print di qualche variabili (con python 3.6 e superiore) con il seguente codice ``` name = 'Science' f'Data {name}' ``` e all'interno delle parentesi graffe è possibile utilizzare qualsiasi codice python che si vuole: ``` f'Data {name.upper()}' ``` Questa sintassi è molto utile per scrivere velocemente delle funzioni di print rapide e veloci per visualizzare il contenuto delle variabili, ma non è una sintassi particolarmente pulita per grandi quantità di codice complesso Per conoscere il tipo delle variabili che usiamo in Python possiamo usare la funzione **type**: ``` type(0) type(2.5) ``` Di seguito mostriamo le operazioni aritmetiche che possono essere fatte in Python: <img src="operators in Python.jpg"> Altre funzioni preimpostate in Python che possono essere utili sono: ``` print("Min:", min(1, 2, 3)) print("Max:", max(1, 2, 3)) print("Absolute Value:", abs(-32)) ``` <a id='section2'></a> ## 2) Le funzioni Una delle funzioni più utili è **help()**, infatti grazie a questa funzione è possibile capire qualsiasi altra funzione che si può usare in Python. ``` abs ``` La funzione help() mostra: <br> - L'intestazione della funzione, indicando quanti/quali argomenti prende in input la funzione; - Una breve descrizione di ciò che fa la funzione. <br> ``` help(print) ``` Ovviamente in Python è possibile definire funzioni personalizzate oltreché usare le funzioni già preimpostate, ad esempio: ``` def min_delta(a, b, c): delta_1 = abs(a-b) delta_2 = abs(b-c) delta_3 = abs(a-c) return min(delta_1, delta_2, delta_3) ``` Nel codice sopra abbiamo creato una funzione che prende in input tre argomenti: a, b, c. <br> Le funzioni iniziano sempre con la parola chiave **def**, il codice associato alla funzione è il blocco di codice indentato ed inserito dopo i "**:**". <br> **return** è un'altra parola chiave associata alla funzione e determina l'uscita immediata dalla funzione, passando in output il valore inserito a destra della parola chiave stessa. <br> <br> Cosa fa la funzione **min_delta**? ``` print(min_delta(1, 10, 100)) print(min_delta(1, 10, 10)) print(min_delta(2, 4, 8)) ``` Proviamo ad usare la funzione **help** per la nostra funzione personalizzata "min_delta": ``` help(min_delta) ``` Possiamo associare al codice che facciamo una descrizione in modo da poterla leggere quando usiamo la funzione pre-impostata di Python, help(). ``` def min_delta(a, b, c): """ La funzione determina la più piccola differenza tra due numeri, utilizzando a, b e c. >>> min_delta(1, 5, -5) 4 """ delta_1 = abs(a-b) delta_2 = abs(b-c) delta_3 = abs(a-c) return min(delta_1, delta_2, delta_3) help(min_delta) ``` Se torniamo ad osservare l'help della funzione di print possiamo osservare che ci sono dei parametri opzionali nella funzione, come il parametro *sep*: ``` help(print) print(1, 2, 3, sep= ' < ') print(1, 2, 3, sep='\n') print(1, 2, 3) ``` è possibile inserire dei parametri opzionali nelle funzioni che costruiamo, nel seguente modo: ``` def benvenuto(chi="Robot"): print("Hello,", chi) benvenuto() benvenuto(chi="Salvo") benvenuto("Andrea") print("Anche le funzioni sono degli oggetti, infatti sono:", type(benvenuto)) ``` Osserviamo il seguente utilizzo dei parametri opzionali: ``` def mod_5(x): """Restituisce il resto di x dopo averlo diviso per 5""" return x % 5 print( 'Qual è il numero più grande?', max(100, 51, 14), 'Quale numero ha il resto maggiore se diviso per 5?', max(100, 51, 14, key=mod_5), sep='\n', ) ``` Se vogliamo creare funzioni molto velocemente da usare in piccole parti di codice è possibile ricorrere alle lambda. Esistono in molti linguaggi di programmazione e sono facili e veloci per piccole parti di codice. Le lambda functions sono anche chiamate funzioni anonime perchè non hanno un nome esplicito (come le funzioni normali definite con "def"). Esse però possono essere associate a delle variabili. **Sintassi** La sintassi di una lambda function è la seguente `lambda arguments: expression` Per ulteriori approfondimenti consultare questo link: https://realpython.com/python-lambda/ Un piccolo esempio di come si usano le lambda: ``` mod_5 = lambda x: x % 5 # Con la parola chiave lambda non è necessario inserire la parola return print('101 mod 5 =', mod_5(101)) abs_diff = lambda a, b: abs(a-b) print("La differenza in termini assoluti tra 5 e 7 è", abs_diff(5, 7)) # Len: restituisce la lunghezza di una sequenza (di una lista o di una stringa) names = ['Salvatore', 'Andrea', 'Leonardo', 'Pietro'] print("Il nome più lungo è:", max(names, key=lambda name: len(name))) ``` <a id='section3'></a> ## 3) Booleans & Condizioni I principali operatori che danno come risposta True/False sono i seguenti: <img src="comparison_operations.jpg"> ``` def votare_senato(eta): """ La persona può votare i membri del Senato in Italia?""" # La Costituzione italiana indica che possono votare i membri del Senato # chi ha compiuto almeno 25 anni d'età. return eta >= 25 print("Chi ha 19 anni può votare i membri del Senato:", votare_senato(19)) print("Chi ha 27 anni può votare i membri del Senato:", votare_senato(27)) ``` È necessario fare attenzione alle tipologie di dati che si mettono a confronto, infatti: ``` 3.0 == 3 '3' == 3 ``` Come altri linguaggi di programmazione, Python permette di combinare i valori booleani utilizzando i concetti di "*and*", "*or*" e "*not*". <br> Qual è il valore della prossima espressione? ``` True or True and False; ``` Python segue delle regole ben precise quando deve valutare espressioni come quelle scritte sopra. L'operatore **and** ha la precedenza sull'operatore **or**. Perciò seguendo le logiche di Python possiamo dividere l'espressione sopra nel seguente modo: <br> - 1. True and False --> False - 2. True or False --> True ``` print(True and False) print(True or print(True and False)) True or True and False ``` Per maggiori dettagli sulle precedenze degli operatori utilizzati in Python è possibile cliccare [qui](https://docs.python.org/3/reference/expressions.html#operator-precedence). <br> Una prassi che può aiutare il lettore a capire quale espressione eseguire prima può essere inserire le parentesi all'interno dell'espressione: <br> *True or (True and False)* <br> <br> Osserviamo ora la seguente espressione cercando di capirne il senso: pronto_per_la_pioggia = Ombrello **or** livello_pioggia < 5 **and** Cappuccio **or not** livello_pioggia > 0 **and** giorno_lavorativo Nell'espressione scritta sopra stiamo provando ad affermare che: <br> Sono salvo dal tempo se: <br> - Ho un ombrello... - oppure, se la pioggia non è forte e ho il cappuccio.. - oppure, piove ed è un giorno lavorativo. <br> <br> L'espressione scritta sopra, oltre ad essere difficile da leggere ha anche un bug. pronto_per_la_pioggia = (<br> Ombrello <br> **or** ((livello_pioggia < 5) **and** Cappuccio) <br> **or** (**not** (livello_pioggia > 0 **and** giorno_lavorativo))<br> ) I booleans tornano molto utili quando vengono usati con la sintassi condizionale, ovvero quando si usano le seguenti parole chiave **if**, **elif** e **else**. ``` def what(x): if x == 0: print(x, "è zero") elif x > 0: print(x, "è positivo") elif x < 0: print(x, "è negativo") else: print(x, "è qualcosa che non ho mai visto..") what(0) what(-15) ``` In Python è presente la funzione **bool()** che trasforma un elemento in una variabile booleana. <br> Ad esempio: ``` print(bool(1)) # Tutti i numeri sono considerati veri, a parte 0 print(bool(0)) print(bool("ahieahie")) # Tutte le stringhe sono considerate vere, a parte # le stringhe vuote "" print(bool("")) ``` Osserviamo il seguente script: ``` def risultato_quiz(voto): if voto < 50: risultato = 'Non hai passato' else: risultato = 'Hai passato' print(risultato, "l'esame, il tuo punteggio è :", voto) risultato_quiz(80) ``` In questo caso è possibile replicare la funzione scritta sopra, nel seguente modo: ``` def risultato_quiz(voto): risultato = 'Non hai passato' if voto < 50 else 'Hai passato' print(risultato, "l'esame, il tuo punteggio è :", voto) risultato_quiz(45) ``` <a id='section4'></a> ## 4) Le liste Le liste in Python sono una sequenza ordinati di valori e sono definite attraverso valori separati da una virgola e contenuti in parentesi quadre. ``` Numeri_primi = [1, 2, 3, 5, 7] type(Numeri_primi) Pianeti = ['Mercurio', 'Venere', 'Terra', 'Marte',\ 'Giove', 'Saturno', 'Urano', 'Nettuno'] Pianeti ``` Una lista può contenere altre liste, ad esempio: ``` Carte = [['J', 'Q', 'K'], ['2', '4', '8'], ['6', 'A', 'K']] # Per una lettura migliore è possibile anche scrivere nel seguente modo: Carte = [ ['J', 'Q', 'K'], ['2', '4', '8'], ['6', 'A', 'K'] ] ``` Una lista può contenere un mix di elementi di tipo diverso: ``` Elementi_preferiti = [27, 'Moto'] ``` È possibile accedere agli elementi di una lista di Python attraverso l'indicizzazione tramite parentesi quadre. <br> Ad esempio, qual è il pianeta più vicino al sole? ``` Pianeti[0] ``` Qual è il pianeta più lontano dal sole? <br> *Gli elementi alla fine di una lista possono essere identificati attraverso i numeri negativi, partendo da -1.* ``` Pianeti[-1] Pianeti[-2] ``` Quali sono i primi tre pianeti più vicini al sole? <br> *Rispondiamo a questa domanda utilizzando lo **slicing*** ``` Pianeti[0:3] ``` La notazione vista sopra "[0:3]" ci dice di partire da 0 e continuare fino all'indice **3, escluso**. Non è necessario indicare l'inizio e la fine dell'indicizzazione qualora si volesse partire/finire con il primo/ultimo elemento di una lista. ``` Pianeti[:3] Pianeti[3:] # Dal terzo pianeta in poi Pianeti[-3:] # Gli ultimi 3 pianeti Pianeti[3] = "Pianeta X" Pianeti Pianeti[:3] = ['A', 'B', 'C'] Pianeti ``` Python ha differenti funzioni che possono essere usate con le liste: <br> - **len**: permette di calcolare la lunghezza di una lista; <br> - **sorted**: dà come risultato la lista ordinata; <br> - **sum**: somma gli elementi di una lista. ``` len(Pianeti) Pianeti = ['Mercurio', 'Venere', 'Terra', 'Marte',\ 'Giove', 'Saturno', 'Urano', 'Nettuno'] sorted(Pianeti) sum(Numeri_primi) ``` Gli oggetti in Python portano con se degli elementi: <br> - I **metodi**: funzioni che possono essere eseguite partendo da un oggetto;<br> - Gli **attributi**: elementi che sono collegati ad un oggetto ma non sono funzioni. Un esempio di **metodo** può essere **bit_length**; ovvero un metodo che è associato ai numeri e indica i bit usati da un numero: ``` x = 12 x.bit_length() ``` Possiamo usare l'help di Python anche per capire cosa fa un metodo di un oggetto di Python. ``` help(x.bit_length) ``` I **metodi ** più utilizzati quando si usano le liste di Python, sono i seguenti: <br> - **.append** : permette di modificare una lista aggiungendo un elemento in fondo alla lista; <br> - **.pop** : rimuove e stampa l'ultimo elemento di una lista; <br> - **.index** : Indica l'indice in cui si trova un determinato elemento all'interno della lista. <br> Di seguito ci sono un pò di esempi. <br> <br> Per osservare tutti i metodi associati ad un oggetto è possibile fare: **help(*nome_oggetto*)** ``` Pianeti.append('Plutone') Pianeti help(Pianeti.append) Pianeti.pop() Pianeti Pianeti.index('Terra') Pianeti.index('Plutone') # La Terra è nella lista dei pianeti? "Terra" in Pianeti "Plutone" in Pianeti ``` Le **Tuple** sono esattamente la stessa cosa delle liste, tuttavia differiscono da quest'ultime per i seguenti punti: <br> - È possibile usare le parentesi tonde per creare le tuple e non per forza le parentesi quadre, come nel caso delle liste; <br> - Le tuple **non** sono modificabili, una volta definite. ``` t = (1, 2, 3) t t[0] = 100 # Assegnazione reciproca di due variabili in maniera "Smart" a = 1 b = 0 a, b = b, a print(a , b) ``` <a id='section5'></a> ## 5) Loops ``` Pianeti # Stampo tutti i pianeti sulla stessa linea for i in Pianeti: print(i, end=' ') "Mercurio" in Pianeti ``` In un loop **for** specifichiamo: <br> - La variabile che vogliamo usare; <br> - La lista su cui vogliamo eseguire il loop <br> <br>E con "**in**" colleghiamo la variabile che cambia in ogni ciclo del loop con la lista da cui prenderà il valore la variabile del loop. A destra di "in" ci deve essere un oggetto che supporta le iterazioni. ``` moltiplicandi = (2, 2, 2, 3, 3, 5) prodotto = 1 for i_molt in moltiplicandi: prodotto = prodotto * i_molt prodotto ``` È possibile iterare anche gli elementi che sono contenuti in una stringa: ``` s = "prova a CapIre lA struttura sOtto" msg = '' # Stampiamo tutte le lettere maiuscole, una alla volta for lettera in s: if lettera.isupper(): print(lettera, end='') ``` **range()** è una funzione che crea una sequenza di numeri; questa funzione può tornare utile durante la scrittura di loops. ``` for i in range(5): print("File elaborati:", i) ``` È possibile assumere che **range(5)** generi una lista di numeri **[0, 1, 2, 3, 4]**; tuttavia in realtà la funzione **range** genere un oggetto *range*, che è diverso dall'oggetto *lista*. ``` r = range(5) r # Possiamo convertire l'oggetto range # in una lista utilizzando il convertitore list() list(r) ``` Finora abbiamo usato la notazione **for** e **in** per iterare una variabile assegnandole i valori che sono inseriti in una lista (o in una tupla). <br> Supponiamo ora di voler *fare il loop sugli elementi di una lista e contemporaneamente fare il loop sull'indice di una lista.* <br> È possibile fare ciò utilizzando la funzione **enumerate**. ``` nums = [0, 1, 2] def raddoppia(nums): for i, num in enumerate(nums): if num % 2 == 1: nums[i] = num * 2 x = list(range(10)) raddoppia(x) x list(enumerate(['a', 'b'])) nums = [ ('uno', 1, 'I'), ('due', 2, 'II'), ('tre', 3, 'III'), ('quattro', 4, 'IV'), ] for parola, intero, numero_romano in nums: print(parola, intero, numero_romano, sep=' = ', end='; ') ``` Quest'ultimo codice appena eseguito è sicuramente più veloce e chiaro del seguente: ``` for tup in nums: parola = tup[0] intero = tup[1] numero_romano = tup[2] print(parola, intero, numero_romano, sep=' = ', end=';') ``` Un altro loop, molto famoso è il **while loops** ``` i = 0 while i < 10: print(i, end=' ') i += 1 ``` Di seguito altre tecniche che possono essere usate con le liste, soprattutto per rispiarmare righe di codice. ``` quadrati = [n**2 for n in range(10)] quadrati ``` Senza usare la tecnica vista prima si poteva ottenere lo stesso risultato nel seguente modo: ``` quadrati = [] for n in range(10): quadrati.append(n**2) quadrati Pianeti_abbr = [Pianeta for Pianeta in Pianeti if len(Pianeta) < 6] Pianeti_abbr [ Pianeta.upper() + '!' for Pianeta in Pianeti if len(Pianeta) < 6 ] ``` Di seguito tre modi diversi, per fare un codice in cui si contano i numeri negativi contenuti in una lista. ``` def conta_negativi(nums): """Indica quanti numeri negativi ci sono in una lista. >>> conta_negativi([5, -1, -2, 0, 3]) 2 """ n_negativi = 0 for num in nums: if num < 0: n_negativi = n_negativi + 1 return n_negativi def conta_negativi(nums): return len([num for num in nums if num < 0]) def conta_negativi(nums): return sum([num < 0 for num in nums]) ``` <a id='section6'></a> ## 6) Le Stringhe In questo paragrafo vedermo i principali metodi e operazioni di formattazione che è possibile usare sulle stringhe. <br> Le stringhe in Python possono essere definite utilizzando sia i doppi apici che i singoli apici. ``` x = 'Plutone è un pianeta' y = "Plutone è un pianeta" x == y ``` Per evitare errori di formattazione è possibile usare i doppi apici o i singoli apici all'interno delle stringhe a seconda che si siano usati singoli apici o doppi apici come delimitatori, ad esempio: ``` # In questo caso otterrremmo errore 'Anch'io sono un pianeta!' ``` È possibile risolvere questo errore utilizzando il simbolo \ prima dell'apice interno alla frase. ``` 'Anch\'io sono un pianeta!' ``` Oppure ``` "Anch'io sono un pianeta" ``` La seguente tabella riepiloga i principali utilizzi del simbolo \ all'interno di una stringa: <img src='blackslash_caracter.jpg'> Le stringhe possono essere viste come una sequenza di caratteri, perciò tutte le cose che si sono viste per le liste possono essere applicate alle stringhe. ``` # Indicizzazione pianeta = 'Plutone' pianeta[2] # Slicing pianeta[-3:] # Quanto è lunga la stringa? len(pianeta) # È possibile fare un loop utilizzando la lunghezza di una stringa [char+'!' for char in pianeta] ``` Tuttavia, a differenza delle liste, **le stringhe sono immutabili**. ``` pianeta[0]='B' ``` Anche le stringhe, come le liste hanno dei metodi associati al loro oggetto. ``` frase = "Plutone è un pianeta" frase.upper() frase.lower() frase.index('un') frase.split() data_stringa = '1991-07-12' anno, mese, giorno = data_stringa.split('-') print(anno) print(mese) print(giorno) '/'.join([giorno, mese, anno]) ``` È possibile unire più stringhe con Python utilizzando l'operatore **+** ``` pianeta + ", sei troppo lontano" ``` Tuttavia è necessario utilizzare la funzione **str()** qualora si volesse unire un oggetto non-stringa ad una stringa ``` position = 9 "Sei arrivato " + str(position) + " su 10 partecipanti." ``` Oppure la funzione **str.format()**: ``` "Sei arrivato {} su {} partecipanti.".format(position, position + 1) prezzo_init = 5.25 prezzo_fin = 6 performance = (prezzo_fin - prezzo_init)/prezzo_init # Nella frase stampero le cifre decimali e # la performance in termini percentuali "Ho comprato le azioni al prezzo di {:.2} e le ho vendute a {}, registrando \ una performance di {:.2%}".format(prezzo_init, prezzo_fin, performance) # È possibile identificare dei riferimenti # alle parole all'interno delle stringhe s = "Plutone è un {0}, non una {1}. \ Preferisco una {1} ad un {0}".format('pianeta', 'mela') print(s) ``` <a id='section7'></a> ## 7) I dizionari I dizionari sono delle strutture pre impostate di Python che permettono di mappare dei valori su delle chiavi. Ad esempio: ``` numeri = {'uno': 1, 'due': 2, 'tre': 3} ``` In questo caso 'uno', 'due' e 'tre' sono le **chiavi**, mentre 1, 2 e 3 sono i loro corrispondenti **valori**. <br> È possibile accedere ai valori attraverso l'utilizzo delle parentesi quadre come si fa con le liste e con le stringhe. ``` numeri['uno'] ``` È possibile aggiungere dei nuovi valori al dizionario, identificando semplicemente una nuova chiave, ad esempio: ``` numeri['quattro'] = 4 numeri ``` È possibile anche cambiare un valore associato ad una chiave già esistente: ``` numeri['uno'] = 0 numeri ``` La sintassi usata per i dizionari è molto simile a quella vista per le liste. ``` pianeta_iniziale = {pianeta: pianeta[0] for pianeta in Pianeti} pianeta_iniziale ``` L'operatore **in** può essere usato per capire se un elemento si trova dentro un dizionario. ``` 'Saturno' in Pianeti 'Pianeta X' in Pianeti ``` Un loop for su un dizionario effettua il loop sulle chiavi del dizionario, ad esempio: ``` for k in numeri: print("{} = {}".format(k, numeri[k])) ``` È possibile accedere direttamente a tutte le chiavi o a tutti i valori di un dizionario attraverso i seguenti metodi dell'oggetto dizionario **dict.keys()** e **dict.values()**. ``` numeri.keys() numeri.values() ``` Uno dei metodi più utili quando si usano i dizionari è **dict.items()**, questo metodo ci permette di iteerare le chiavi e i valori di un dizionario simultaneamente. ``` for pianeta, iniziale_pianeta in pianeta_iniziale.items(): print("{} inizia con '{}'".format(pianeta.rjust(10), iniziale_pianeta)) ``` <a id='section8'></a> ## 8) Le librerie esterne Una delle qualità principali di Python è l'elevato numero di librerie personalizzate che sono state scritte per questo linguaggio di programmazione. Alcune di queste librerie sono *standard*, ovvero possono essere trovate in qualsiasi Python; tuttavia le librerie che non sono comprese di default in Python possono essere facilmente richiamate attraverso la parola chiave **import**. <br> Importiamo la libreria *math* così come abbiamo fatto nel primo script di questo notebook. ``` import math print("Math è di questo tipo: {}".format(type(math))) ``` Per visualizzare le informazioni di python relative ad una libreria è sufficiente lanciare la libreria stessa ``` display ``` Mentre per visualizzare la documentazione e le informazioni è sufficiente inserire un punto di domanda prima della funzione ``` ?display ``` Per visualizzare invece il source code della funzione che volete utilizzare è sufficiente utilizare due punti di domanda ``` ??display ``` Math è un modulo, ovvero una collezione di variabili e funzioni definite da qualcun altro. È possibile osservare tutte le variabili e funzioni contenute in Math utilizzando la funzione **dir()**. ``` print(dir(math)) print("I primi quattro numeri del pi-quadro sono = {:.4}".format(math.pi)) math.log(32,2) help(math.log) ``` Come abbiamo accennato all'inizio di questo notebook, quando si importa una libreria è possibile assegnarle un nome abbreviato per poterlo riusare nel codice. ``` import math as mt mt.pi ``` È possibile importare anche solo una particolare variabile contenuta all'interno della libreria senza dover importarsi tutta la libreria, in questo caso potremmo usare la seguente notazione: ``` from math import pi print(pi) from math import * from numpy import * print(pi, log(32,2)) ``` In questo caso abbiamo riscontrato un errore poichè la variabile **log** è contenuta sia nella libreria *math* che nella *numpy*, ma ha differenti input. Poichè abbiamo importato anche la libreria *numpy* in questo caso il log di quest'ultima libreria ha sovrascritto il log della libreria math.<br> Un modo per risolvere il problema di prima è importare solamente ciò che vogliamo davvero usare, ad esempio: ``` from math import log, pi from numpy import asarray print(pi, log(32,2)) ``` In generale, se incontriamo degli oggetti di Python che non conosciamo possiamo utilizzare tre funzioni pre-impostate di Python:<br> - 1)**type()** : ci dice cos'è l'oggetto; - 2)**dir()**: ci dice cosa può fare l'oggetto; - 3)**help()**: ci dice più in dettaglio i metodi associati all'oggetto e le loro funzionalità # Bonus: Pythonic Code! <a id='section9'></a> Scrivere in python è molto semplice e molto veloce rispetto ad altri linguaggi di programmazione. L'indentazione automatica inoltre ti porta a scrivere del codice pulito. E' importante però prestare comunque attenzione nello scrivere codice ben fatto in gergo si dice: pythonic! Ecco perchè esiste lo Zen di Python ``` import this ``` Ed inoltre PEP-8! PEP 8, a volte digitato PEP8 o PEP-8, è un documento che fornisce linee guida e procedure consigliate su come scrivere codice Python. È stato scritto nel 2001 da Guido van Rossum, Barry Varsavia e Nick Coghlan. L'obiettivo principale di PEP 8 è migliorare la leggibilità e la coerenza del codice Python. PEP sta per Python Enhancement Proposal, e ce ne sono molti. Un PEP è un documento che descrive le nuove funzionalità proposte per Python e documenta aspetti di Python, come il design e lo stile, per la comunità. https://realpython.com/python-pep8/ Esistono anche dei linter che consentono di formattare e controllare lo stile del codice python. A tal proposito è importante citare: pylint e pycodestyle (che possono essere installati come librerie esterne) https://github.com/PyCQA/pycodestyle https://www.pylint.org/ ### Qualche piccola considerazione Anche se in questo corso non verranno trattate tematiche di software engineering e di sviluppo è comunque importante scrivere un buon codice pulito per alcune semplici ragioni: - Si lavora in team, quindi il codice che noi scriviamo verrà sicuramente usato / visto / controllato da altre persone, agevolare la lettura del codice alle altre persone è importante (Etica della reciprocità, detta anche "Regola d'oro") https://it.wikipedia.org/wiki/Etica_della_reciprocit%C3%A0 - Molto spesso si torna su codice scritto da tempo, avere del buon codice riduce di tanto i tempi di "rinfrescamento della memoria e ripasso" - Scrivere codice pulito permette di trovare velocemente gli errori e bugs, soprattutto con grandi quantità di codice - Perchè è importante fare le cose fatte bene, belle. - Perchè sì. Un'altra massima è la seguente: Documentate il codice. È importante. Inserite qualche log nel codice. È importante. Non lasciate codice commentato. È importante. Siate precisi e seguite le best-practices. È importante. Spesso nel mondo della Data Science per realizzare prototipi e analisi velocmente si trascurano questi concetti, è importante invece cercare di applicarli quanto più possibile...per noi e per gli altri! [Clicca qui per tornare all'inizio della pagina](#start)<a id='start'></a> Con questo paragrafo si conclude il notebook "Introduction to Python", clicca qui per passare al prossimo notebook "Collecting". Per eventuali dubbi ci potete scrivere su Teams!<br> A presto!
github_jupyter
``` """ Demo showing how km_dict and insegtprobannotator may be used together for interactive segmentation. @author: vand and abda """ import sys import skimage.io import skimage.data import skimage.transform import numpy as np %gui qt patch_dir = '/Users/vand/Documents/PROJECTS2/InSegt/pycode' import sys if patch_dir not in sys.path: sys.path.append(patch_dir) import insegtprobannotator import km_dict import feat_seg import PyQt5.QtCore import glob import matplotlib.pyplot as plt import matplotlib.colors %matplotlib notebook import time #%% EXAMPLE: nerve fibres in_dir = '/Users/vand/Documents/PROJECTS2/InSegt/data/nerve/' file_names = sorted(glob.glob(in_dir + '*.png')) n_im = len(file_names) n = 25 sc_fac = 0.75 # loading image print('Loading image') image = skimage.transform.rescale(skimage.io.imread(file_names[n]), sc_fac, preserve_range = True) fig, ax = plt.subplots(1) ax.imshow(image) plt.show() int_patch_size = 9 branching_factor = 5 number_layers = 5 number_training_patches = 200000 normalization = False patch_size_feat = 5 n_train = 50000 n_keep = 25 order_keep = (True, True, True) image_float = image.astype(np.float)/255 # Compute feature image feat_im, vec, mean_patch, norm_fac = feat_seg.get_uni_pca_feat(image_float, patch_size_feat, n_train, n_keep, order_keep, sigma = 2) feat_im = np.asarray(feat_im.transpose(2,0,1), order='C') # Build tree T = km_dict.build_km_tree(feat_im, 1, branching_factor, number_training_patches, number_layers, normalization) # Search km-tree and get assignment A, number_nodes = km_dict.search_km_tree(feat_im, T, branching_factor, normalization) # number of repetitions for updating the segmentation number_repetitions = 2 def processing_function(labels): r,c = labels.shape l = np.max(labels)+1 if(l>1): label_image = np.zeros((r,c,l)) for k in range(number_repetitions): for i in range(1,l): label_image[:,:,i] = (labels == i).astype(float) D = km_dict.improb_to_dictprob(A, label_image[:,:,1:], number_nodes, int_patch_size) # Dictionary P = km_dict.dictprob_to_improb(A, D, int_patch_size) # Probability map labels = (np.argmax(P,axis=2) + 1)*(np.sum(P,axis=2)>0)# Segmentation else: P = np.empty((r,c,0)) D = None return labels, P.transpose(2,0,1), D pf = lambda labels: processing_function(labels)[:2] print('Showtime') # showtime app = PyQt5.QtCore.QCoreApplication.instance() if app is None: app = insegtprobannotator.PyQt5.QtWidgets.QApplication(sys.argv) ex = insegtprobannotator.InSegtProbAnnotator(image.astype(np.uint8), pf) app.exec_() # Get the labels labels = ex.rgbaToLabels(ex.pixmapToArray(ex.annotationPix)) # Compute dictionary segmentation, P_out, D = processing_function(labels) r,c = image.shape V = np.empty((r,c,n_im)) t = time.time() for i in range(0,n_im): im_in = skimage.transform.rescale(skimage.io.imread(file_names[i]), sc_fac, preserve_range = True).astype(np.float)/255 feat_im_b = feat_seg.get_uni_pca_feat(im_in, vec = vec, mean_patch = mean_patch, sigma = 2, norm_fac = norm_fac)[0].transpose(2,0,1)# Compute feature image A_b = km_dict.search_km_tree(feat_im_b, T, branching_factor, normalization)[0] V[:,:,i] = km_dict.dictprob_to_improb(A_b, D, int_patch_size)[:,:,0] if (i % 10 == 0): print(f'Iteration: {i:03.0f} of {n_im:03.0f} Time {time.time()-t:0.3} sec') t = time.time() fig, ax = plt.subplots(1,2) ax[0].imshow(V[:,:,10]<0.5) ax[1].imshow(V[:,100,:]<0.5) plt.show() import vizVTK vizVTK.visgray(((V>0.5)*255).astype(np.uint8), scalarOpacity = {0: 1.0, 130: 0.001}, gradientOpacity=None, colorTransfer = {0: (0.0, 1.0, 1.0), 255: (1.0, 0.0, 0.0)}, windowSize = (1600,1600)) ```
github_jupyter
# McStas ## First time setup McStas Script ``` from mcstasscript.interface import functions # Each time a new conda env is created and used McStas must be configured my_configurator = functions.Configurator() my_configurator.set_mcrun_path("/usr/local/bin/") my_configurator.set_mcstas_path("/usr/local/mcstas/2.5/") ``` ## McStas instr file ``` from mcstasscript.interface import instr, plotter, functions ISIS_SANS2d_Mantid = instr.McStas_instr("ISIS_SANS2d_Mantid_generated") ISIS_SANS2d_Mantid.add_parameter("double", "L1", value=3.926) ISIS_SANS2d_Mantid.add_parameter("double", "A1w", value=0.03) ISIS_SANS2d_Mantid.add_parameter("double", "A1h", value=0.02) ISIS_SANS2d_Mantid.add_parameter("double", "S6", value=0.006) ISIS_SANS2d_Mantid.add_parameter("double", "A2", value=0.006) ISIS_SANS2d_Mantid.add_parameter("double", "Lmin", value=1.0) ISIS_SANS2d_Mantid.add_parameter("double", "Lmax", value=14.0) ISIS_SANS2d_Mantid.add_parameter("double", "model_nr", value=5.0) a1 = ISIS_SANS2d_Mantid.add_component("a1", "Progress_bar") a1.set_AT(['0', '0', '0'], RELATIVE="ABSOLUTE") Origin = ISIS_SANS2d_Mantid.add_component("Origin", "Arm") Origin.set_AT(['0', '0', '0'], RELATIVE="ABSOLUTE") isis_source = ISIS_SANS2d_Mantid.add_component("isis_source", "ISIS_moderator") isis_source.Face = "\"E2\"" isis_source.Emin = "-Lmax" isis_source.Emax = "-Lmin" isis_source.dist = 3.68 isis_source.focus_xw = 0.0365 isis_source.focus_yh = 0.021 isis_source.xwidth = -1 isis_source.yheight = -1 isis_source.CAngle = 0.0 isis_source.SAC = 1 isis_source.set_AT(['0.0', ' 0.0', ' 0.00001'], RELATIVE="Origin") isis_source.set_ROTATED(['0.0', ' 0.0', ' 0.0'], RELATIVE="Origin") lmon1 = ISIS_SANS2d_Mantid.add_component("lmon1", "L_monitor") lmon1.nL = 140 lmon1.filename = "\"lmon1.dat\"" lmon1.xmin = -0.04 lmon1.xmax = 0.04 lmon1.ymin = -0.03 lmon1.ymax = 0.03 lmon1.Lmin = 0.0 lmon1.Lmax = 17.0 lmon1.set_AT(['0.0', ' 0.0', ' 3.698'], RELATIVE="isis_source") psd1 = ISIS_SANS2d_Mantid.add_component("psd1", "PSD_monitor") psd1.nx = 100 psd1.ny = 100 psd1.filename = "\"psd1.dat\"" psd1.xmin = -0.05 psd1.xmax = 0.05 psd1.ymin = -0.05 psd1.ymax = 0.05 psd1.set_AT(['0.0', ' 0.0', ' 3.699'], RELATIVE="isis_source") bender1 = ISIS_SANS2d_Mantid.add_component("bender1", "Guide_gravity") bender1.w1 = .0355 bender1.h1 = .020 bender1.w2 = .0355 bender1.h2 = .020 bender1.l = 0.3245 bender1.nslit = 9 bender1.d = .0005 bender1.mleft = 1 bender1.mright = 3 bender1.mtop = 1 bender1.mbottom = 1 bender1.wavy = 0 bender1.set_AT(['0', ' 0', ' 3.7'], RELATIVE="isis_source") bender1.set_ROTATED(['0.0', ' 0.137099', ' 0.0'], RELATIVE="isis_source") bender2 = ISIS_SANS2d_Mantid.add_component("bender2", "Guide_gravity") bender2.w1 = .0355 bender2.h1 = .020 bender2.w2 = .0355 bender2.h2 = .020 bender2.l = 0.3245 bender2.nslit = 9 bender2.d = .0005 bender2.mleft = 1 bender2.mright = 3 bender2.mtop = 1 bender2.mbottom = 1 bender2.wavy = 0 bender2.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender1") bender2.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender1") bender3 = ISIS_SANS2d_Mantid.add_component("bender3", "Guide_gravity") bender3.w1 = .0355 bender3.h1 = .020 bender3.w2 = .0355 bender3.h2 = .020 bender3.l = 0.3245 bender3.nslit = 9 bender3.d = .0005 bender3.mleft = 1 bender3.mright = 3 bender3.mtop = 1 bender3.mbottom = 1 bender3.wavy = 0 bender3.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender2") bender3.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender2") bender4 = ISIS_SANS2d_Mantid.add_component("bender4", "Guide_gravity") bender4.w1 = .0355 bender4.h1 = .020 bender4.w2 = .0355 bender4.h2 = .020 bender4.l = 0.3245 bender4.nslit = 9 bender4.d = .0005 bender4.mleft = 1 bender4.mright = 3 bender4.mtop = 1 bender4.mbottom = 1 bender4.wavy = 0 bender4.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender3") bender4.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender3") bender5 = ISIS_SANS2d_Mantid.add_component("bender5", "Guide_gravity") bender5.w1 = .0355 bender5.h1 = .020 bender5.w2 = .0355 bender5.h2 = .020 bender5.l = 0.3245 bender5.nslit = 9 bender5.d = .0005 bender5.mleft = 1 bender5.mright = 3 bender5.mtop = 1 bender5.mbottom = 1 bender5.wavy = 0 bender5.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender4") bender5.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender4") bender6 = ISIS_SANS2d_Mantid.add_component("bender6", "Guide_gravity") bender6.w1 = .0355 bender6.h1 = .020 bender6.w2 = .0355 bender6.h2 = .020 bender6.l = 0.3245 bender6.nslit = 9 bender6.d = .0005 bender6.mleft = 1 bender6.mright = 3 bender6.mtop = 1 bender6.mbottom = 1 bender6.wavy = 0 bender6.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender5") bender6.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender5") bender7 = ISIS_SANS2d_Mantid.add_component("bender7", "Guide_gravity") bender7.w1 = .0355 bender7.h1 = .020 bender7.w2 = .0355 bender7.h2 = .020 bender7.l = 0.3245 bender7.nslit = 9 bender7.d = .0005 bender7.mleft = 1 bender7.mright = 3 bender7.mtop = 1 bender7.mbottom = 1 bender7.wavy = 0 bender7.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender6") bender7.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender6") bender8 = ISIS_SANS2d_Mantid.add_component("bender8", "Guide_gravity") bender8.w1 = .0355 bender8.h1 = .020 bender8.w2 = .0355 bender8.h2 = .020 bender8.l = 0.3245 bender8.nslit = 9 bender8.d = .0005 bender8.mleft = 1 bender8.mright = 3 bender8.mtop = 1 bender8.mbottom = 1 bender8.wavy = 0 bender8.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender7") bender8.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender7") bender9 = ISIS_SANS2d_Mantid.add_component("bender9", "Guide_gravity") bender9.w1 = .0355 bender9.h1 = .020 bender9.w2 = .0355 bender9.h2 = .020 bender9.l = 0.3245 bender9.nslit = 9 bender9.d = .0005 bender9.mleft = 1 bender9.mright = 3 bender9.mtop = 1 bender9.mbottom = 1 bender9.wavy = 0 bender9.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender8") bender9.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender8") bender10 = ISIS_SANS2d_Mantid.add_component("bender10", "Guide_gravity") bender10.w1 = .0355 bender10.h1 = .020 bender10.w2 = .0355 bender10.h2 = .020 bender10.l = 0.3245 bender10.nslit = 9 bender10.d = .0005 bender10.mleft = 1 bender10.mright = 3 bender10.mtop = 1 bender10.mbottom = 1 bender10.wavy = 0 bender10.set_AT(['0', ' 0', ' 0.325'], RELATIVE="bender9") bender10.set_ROTATED(['0.0', ' 0.1375099', ' 0.0'], RELATIVE="bender9") lmonb = ISIS_SANS2d_Mantid.add_component("lmonb", "L_monitor") lmonb.nL = 140 lmonb.filename = "\"lmonB.dat\"" lmonb.xmin = -0.018 lmonb.xmax = 0.018 lmonb.ymin = -0.018 lmonb.ymax = 0.018 lmonb.Lmin = 0.0 lmonb.Lmax = 17.0 lmonb.set_AT(['0.0', ' 0.0', ' 0.326'], RELATIVE="bender10") psd2 = ISIS_SANS2d_Mantid.add_component("psd2", "PSD_monitor") psd2.nx = 100 psd2.ny = 100 psd2.filename = "\"psd2.dat\"" psd2.xmin = -0.025 psd2.xmax = 0.025 psd2.ymin = -0.025 psd2.ymax = 0.025 psd2.set_AT(['0.0', ' 0.0', ' 0.001'], RELATIVE="lmonb") guide_in = ISIS_SANS2d_Mantid.add_component("guide_in", "Slit") guide_in.xmin = -0.015 guide_in.xmax = 0.015 guide_in.ymin = -.01 guide_in.ymax = +.01 guide_in.set_AT(['0', ' 0', ' 0.2845'], RELATIVE="psd2") guide_straight1 = ISIS_SANS2d_Mantid.add_component("guide_straight1", "Guide_gravity") guide_straight1.w1 = .030 guide_straight1.h1 = .020 guide_straight1.w2 = .030 guide_straight1.h2 = .020 guide_straight1.l = 1.985 guide_straight1.mleft = 1 guide_straight1.mright = 1 guide_straight1.mtop = 1 guide_straight1.mbottom = 1 guide_straight1.wavy = 0 guide_straight1.set_AT(['0', ' 0', ' 0.0075'], RELATIVE="guide_in") guide_straight2 = ISIS_SANS2d_Mantid.add_component("guide_straight2", "Guide_gravity") guide_straight2.w1 = .030 guide_straight2.h1 = .020 guide_straight2.w2 = .030 guide_straight2.h2 = .020 guide_straight2.l = 1.985 guide_straight2.mleft = 1 guide_straight2.mright = 1 guide_straight2.mtop = 1 guide_straight2.mbottom = 1 guide_straight2.wavy = 0 guide_straight2.set_AT(['0', ' 0', ' 2.000'], RELATIVE="guide_straight1") guide_straight3 = ISIS_SANS2d_Mantid.add_component("guide_straight3", "Guide_gravity") guide_straight3.w1 = .030 guide_straight3.h1 = .020 guide_straight3.w2 = .030 guide_straight3.h2 = .020 guide_straight3.l = 1.985 guide_straight3.mleft = 1 guide_straight3.mright = 1 guide_straight3.mtop = 1 guide_straight3.mbottom = 1 guide_straight3.wavy = 0 guide_straight3.set_AT(['0', ' 0', ' 2.000'], RELATIVE="guide_straight2") guide_straight4 = ISIS_SANS2d_Mantid.add_component("guide_straight4", "Guide_gravity") guide_straight4.w1 = .030 guide_straight4.h1 = .020 guide_straight4.w2 = .030 guide_straight4.h2 = .020 guide_straight4.l = 1.985 guide_straight4.mleft = 1 guide_straight4.mright = 1 guide_straight4.mtop = 1 guide_straight4.mbottom = 1 guide_straight4.wavy = 0 guide_straight4.set_AT(['0', ' 0', ' 2.000'], RELATIVE="guide_straight3") psd3 = ISIS_SANS2d_Mantid.add_component("psd3", "PSD_monitor") psd3.nx = 100 psd3.ny = 100 psd3.filename = "\"psd3.dat\"" psd3.xmin = -0.030 psd3.xmax = 0.030 psd3.ymin = -0.030 psd3.ymax = 0.030 psd3.set_AT(['0.0', ' 0.0', ' 7.999'], RELATIVE="guide_in") aperture1 = ISIS_SANS2d_Mantid.add_component("aperture1", "Slit") aperture1.xwidth = "A1w" aperture1.yheight = "A1h" aperture1.set_AT(['0', ' 0', ' 8.000'], RELATIVE="guide_in") lmonitor2 = ISIS_SANS2d_Mantid.add_component("lmonitor2", "L_monitor") lmonitor2.nL = 140 lmonitor2.filename = "\"lmonitor2.dat\"" lmonitor2.xmin = -0.0155 lmonitor2.xmax = 0.0155 lmonitor2.ymin = -0.0105 lmonitor2.ymax = 0.0105 lmonitor2.Lmin = 0.0 lmonitor2.Lmax = 17.0 lmonitor2.set_AT(['0.0', ' 0.0', ' 2.651'], RELATIVE="aperture1") S6 = ISIS_SANS2d_Mantid.add_component("S6", "Slit") S6.radius = "S6" S6.set_AT(['0', ' 0', ' 2.800'], RELATIVE="aperture1") sourceMantid = ISIS_SANS2d_Mantid.add_component("sourceMantid", "Arm") sourceMantid.set_AT(['0', ' 0', ' -18.087'], RELATIVE="S6") APERTURE2 = ISIS_SANS2d_Mantid.add_component("APERTURE2", "Slit") APERTURE2.radius = "A2" APERTURE2.set_AT(['0', ' 0', ' L1 '], RELATIVE="aperture1") lmon2 = ISIS_SANS2d_Mantid.add_component("lmon2", "L_monitor") lmon2.nL = 140 lmon2.filename = "\"Edet0.dat\"" lmon2.xmin = -0.0075 lmon2.xmax = 0.0075 lmon2.ymin = -0.0075 lmon2.ymax = 0.0075 lmon2.Lmin = 0.0 lmon2.Lmax = 17.0 lmon2.set_AT(['0.0', ' 0.0', ' 0.285'], RELATIVE="APERTURE2") psd4 = ISIS_SANS2d_Mantid.add_component("psd4", "PSD_monitor") psd4.nx = 100 psd4.ny = 100 psd4.filename = "\"psd4.dat\"" psd4.xmin = -0.0075 psd4.xmax = 0.0075 psd4.ymin = -0.0075 psd4.ymax = 0.0075 psd4.set_AT(['0.0', ' 0.0', ' 0.286'], RELATIVE="APERTURE2") psd5 = ISIS_SANS2d_Mantid.add_component("psd5", "PSD_monitor") psd5.nx = 100 psd5.ny = 100 psd5.filename = "\"psd5.dat\"" psd5.xmin = -0.0075 psd5.xmax = 0.0075 psd5.ymin = -0.0075 psd5.ymax = 0.0075 psd5.restore_neutron = 1 psd5.set_AT(['0.0', ' 0.0', ' 0.18'], RELATIVE="psd4") sampleMantid = ISIS_SANS2d_Mantid.add_component("sampleMantid", "SANS_benchmark2") sampleMantid.xwidth = 0.01 sampleMantid.yheight = 0.01 sampleMantid.zthick = 0.001 sampleMantid.model = "model_nr" sampleMantid.dsdw_inc = 0.0 sampleMantid.sc_aim = 1.0 sampleMantid.sans_aim = 1.00 sampleMantid.singlesp = 1.0 sampleMantid.append_EXTEND("if (!SCATTERED) ABSORB;") sampleMantid.set_SPLIT("") sampleMantid.set_AT(['0', ' 0', ' 0.2'], RELATIVE="psd4") detector = ISIS_SANS2d_Mantid.add_component("detector", "PSD_monitor") detector.nx = 200 detector.ny = 200 detector.filename = "\"PSD.dat\"" detector.xmin = -0.5 detector.xmax = 0.5 detector.ymin = -0.5 detector.ymax = 0.5 detector.restore_neutron = 1 detector.set_AT(['0', ' 0', ' 3.9'], RELATIVE="sampleMantid") nD_Mantid_1 = ISIS_SANS2d_Mantid.add_component("nD_Mantid_1", "Monitor_nD") nD_Mantid_1.xmin = -0.5 nD_Mantid_1.xmax = 0.5 nD_Mantid_1.ymin = -0.5 nD_Mantid_1.ymax = 0.5 nD_Mantid_1.restore_neutron = 1 nD_Mantid_1.options = "\"mantid square x limits=[-0.5 0.5] bins=128 y limits=[-0.5 0.5] bins=128,, neutron pixel t,, list all neutrons\"" nD_Mantid_1.filename = "\"bank01_events.dat\"" nD_Mantid_1.set_AT(['0', ' 0', ' 3.9'], RELATIVE="sampleMantid") lmon_post = ISIS_SANS2d_Mantid.add_component("lmon_post", "L_monitor") lmon_post.nL = 140 lmon_post.filename = "\"lmonitor_post.dat\"" lmon_post.xmin = -0.5 lmon_post.xmax = 0.5 lmon_post.ymin = -0.5 lmon_post.ymax = 0.5 lmon_post.Lmin = 0.0 lmon_post.Lmax = 17.0 lmon_post.restore_neutron = 1 lmon_post.set_AT(['0.0', ' 0.0', ' 3.9'], RELATIVE="sampleMantid") ``` # McStas Simulation ## Run McStas ``` data = ISIS_SANS2d_Mantid.run_full_instrument(foldername="data_1E7_nr15", parameters={"model_nr":15}, ncount=1E7) plot = plotter.make_sub_plot(data) ``` ## McStas IDF for Mantid ``` import subprocess cmd_idf = "mcdisplay.pl ISIS_SANS2d_Mantid_generated.instr --format=Mantid -n0 model_nr=5" subprocess.call([cmd_idf], shell=True) ``` ## Run McStas with NeXus ``` data_nexus = ISIS_SANS2d_Mantid.run_full_instrument(foldername="data_1E7_nexus_nr15", parameters={"model_nr":15}, ncount=1E7, custom_flags=" -c --format=NeXus ") print(data_nexus) ``` # Mantid ## Setup Mantid ``` from mantid.simpleapi import * ``` ## Load event data ``` # Load McStas event data from section 1 ws = Load(data_nexus) ``` ## Rebin TOF ``` sample = Rebin(ws[0],'1000', False) ``` ## Convert to lambda ``` sample = ConvertUnits(sample, 'Wavelength') ``` ## Rebin lamba ``` binning = '1.75,0.1,16.5' sample = Rebin(sample, binning) ``` ## Load McStas normalization spectrum ``` mcstasMonitor = CloneWorkspace(mtd['Edet0.dat_ws']) ``` ## Convert McStas data to Mantid histogram ``` mcstasMonitor = ConvertToHistogram(mcstasMonitor) mcstasMonitor = rebin(mcstasMonitor, binning) monitor = CreateWorkspace(mcstasMonitor.dataX(0), mcstasMonitor.dataY(0), mcstasMonitor.dataE(0), UnitX='Wavelength') ``` ## Setup q grid ``` binningMASK = '0.0035,0.001,0.4' ``` ## Reduce to I(q) and normailzed ``` reduced_norm = Q1D(sample, binningMASK, WavelengthAdj='monitor') ``` ## Save file ``` SaveAscii(InputWorkspace='reduced_norm', Filename='Mantid_reduced.dat', Separator='Tab', ScientificFormat=True) ``` ## Plot I(q) ``` %matplotlib notebook import matplotlib.pyplot as plt import numpy as np # Remove zero and inf data_q = np.column_stack((reduced_norm.readX(0)[:-1], reduced_norm.readY(0))) data_q = data_q[np.logical_not(np.isnan(data_q[:, 1]))] data_q = data_q[np.logical_not(np.isinf(data_q[:, 1]))] data_q = data_q[data_q[:, 1] != 0] plt.plot(data_q[:,0], data_q[:,1], 'k-', lw=2) plt.yscale('log') plt.xlabel('q [ 1/Å ]') plt.ylabel('I(q) [a.u.]') # Save data for SasView np.savetxt("Iq_cleaned.dat", data_q, header="<X> <Y>", comments='') # Save data for SasView with errors data_q_err = np.column_stack((data_q[:,0], data_q[:,1], 0.2*data_q[:,1])) np.savetxt("Iq_cleaned_err.dat", data_q_err, header="<X> <Y> <dY>", comments='') Iq_file = "Iq_cleaned_err.dat" ``` # SasView ## Setup Sasmodles and SasView ``` from sasmodels.core import load_model from sasmodels.bumps_model import Model, Experiment from sasmodels.data import load_data from bumps.names import * from bumps.fitters import fit from bumps.formatnum import format_uncertainty from bumps.formatnum import * from bumps.dream.stats import var_stats, format_vars ``` ## Setup fit function ### Without polydispersion ``` def fit_data(filename, fit_method, bumps_samples, bumps_burn): """ Run SasView SasModels fit :return: """ # Load data to fit test_data = load_data(filename) # Set up fit model kernel = load_model('sphere') pars = dict(radius=150, background=0.0, scale=1E-6, sld=7.0, sld_solvent=1.0) model = Model(kernel, **pars) # SET THE FITTING PARAMETERS model.radius.range(10, 200) model.scale.range(1E-7, 1E0) model.background.range(0, 1000) M = Experiment(data=test_data, model=model) problem = FitProblem(M) print("Initial chisq", problem.chisq_str()) result = fit(problem, method=fit_method, samples=bumps_samples, burn=bumps_burn) draw = result.state.draw(portion=1.0) all_vstats = var_stats(draw) return all_vstats ``` ### With polydispersion ``` def fit_data_pd(filename, fit_method, bumps_samples, bumps_burn): """ Run SasView SasModels fit :return: """ # Load data to fit test_data = load_data(filename) # Set up fit model kernel = load_model('sphere') pars = dict(radius=150, background=0.0, scale=1E-6, sld=7.0, sld_solvent=1.0, radius_pd=0.03, radius_pd_n=35, radius_pd_nsigma=3) model = Model(kernel, **pars) # SET THE FITTING PARAMETERS model.radius.range(10, 200) model.scale.range(1E-7, 1E1) model.background.range(0, 100) model.radius_pd.range(0.0, 0.1) M = Experiment(data=test_data, model=model) problem = FitProblem(M) print("Initial chisq", problem.chisq_str()) result = fit(problem, method=fit_method, samples=bumps_samples, burn=bumps_burn) draw = result.state.draw(portion=1.0) all_vstats = var_stats(draw) return all_vstats ``` ## Setup print all results function ``` def print_all_results(bumps_fit_result): """ Print parameters for fit :param bumps_fit_result: :return: """ for v in bumps_fit_result: print(v.label, v.mean, v.median, v.best, v.p68[0], v.p68[1], v.p95[0], v.p95[1]) ``` ## Setup print parameter result function ``` def print_results(bumps_fit_result, parmeter_to_print, conf_interval): """ SasModels "true" parmeter can be [v.mean, v.median, v.best] :param bumps_fit_result: :param parmeter_to_print: :param conf_interval: :return: """ for v in bumps_fit_result: if v.label == parmeter_to_print: if conf_interval: print(parmeter_to_print + ': ' + str(v.best) + ',' + str(0.5*(v.p95[1]-v.p95[0])) + '\n') else: print(parmeter_to_print + ': ' + v.best + ',' + 0.5*(v.p68[1]-v.p68[0]) + '\n') ``` ## Fit data ``` # Fit data from just made McStas-Mantid workflow. With polydisersion. fit_result = fit_data('Iq_cleaned_err.dat', 'dream', 10000, 100) # Fit data from just made McStas-Mantid workflow. With polydisersion. fit_result_pd = fit_data_pd('Iq_cleaned_err.dat', 'dream', 10000, 100) ``` ## Print results ``` print_results(fit_result, 'radius', True) print_results(fit_result, 'scale', True) print_results(fit_result, 'background', True) print_results(fit_result_pd, 'radius', True) print_results(fit_result_pd, 'scale', True) print_results(fit_result_pd, 'background', True) print_results(fit_result_pd, 'radius_pd', True) ``` ## Plot data and model - Without polydispersion ``` # Plot McStas-Mantid data and SasView model. Without polydispersion. from numpy import logspace, linspace from matplotlib import pyplot as plt from sasmodels.core import load_model from sasmodels.direct_model import call_kernel # Load data q2, Iq2, Iqerr2 = np.loadtxt('Iq_cleaned_err.dat', unpack=True, skiprows=2) model = load_model('sphere') q =linspace(0.001, 0.5, num=200) kernel = model.make_kernel([q]) Iq = call_kernel(kernel, dict(radius=148.18, scale=4.16e-05, background=5.78e-07, sld=7.0, sld_solvent=1.0)) plt.semilogy(q, Iq, label='SasView') plt.semilogy(q2, Iq2, label='McStas_Mantid') plt.xlabel('q (1/A)') plt.ylabel('I(q)') plt.title('McStas-Mantid: Sphere radius 150 Å. Without polydispersion.') plt.legend(loc='upper right') plt.show() ``` ## Plot data and model - With polydispersion ``` # Plot McStas-Mantid data and SasView model. With polydispersion to mimic instrument resolution effects. # Still same mono-disperse McStas scattering kernel. from numpy import logspace, linspace from matplotlib import pyplot as plt from sasmodels.core import load_model from sasmodels.direct_model import call_kernel # Load data q2, Iq2, Iqerr2 = np.loadtxt('Iq_cleaned_err.dat', unpack=True, skiprows=2) model = load_model('sphere') q =linspace(0.001, 0.5, num=200) kernel = model.make_kernel([q]) Iq = call_kernel(kernel, dict(radius=150.62, radius_pd=0.036,radius_pd_n=35 , scale=6.45e-05, background=2e-08, sld=7.0, sld_solvent=1.0)) plt.semilogy(q, Iq, label='SasView') plt.semilogy(q2, Iq2, label='McStas_Mantid') plt.xlabel('q (1/A)') plt.ylabel('I(q)') plt.title('McStas-Mantid: Sphere radius 150 Å. With ploydispersion.') plt.legend(loc='upper right') plt.show() ```
github_jupyter
<small><small><i> All the IPython Notebooks in **[Python Seaborn Module](https://github.com/milaan9/12_Python_Seaborn_Module)** lecture series by **[Dr. Milaan Parmar](https://www.linkedin.com/in/milaanparmar/)** are available @ **[GitHub](https://github.com/milaan9)** </i></small></small> <a href="https://colab.research.google.com/github/milaan9/12_Python_Seaborn_Module/blob/main/008_Seaborn_Distribution_Plots.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # What is Distribution Plots? - Flexibly plot a univariate distribution of observations. - This function combines the matplotlib hist function (with automatic calculation of a good default bin size) with the seaborn **`kdeplot()`** and **`rugplot()`** functions. It can also fit scipy.stats distributions and plot the estimated PDF over the data. ### Let's discuss some plots that allow us to visualize the distribution of a dataset. These plots are: - **`distplot()`** - **`jointplot()`** - **`pairplot()`** - **`rugplot()`** - **`kdeplot()`** ``` import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas as pd %matplotlib inline num = np.random.randn(150) sns.distplot(num,color ='green') label_dist = pd.Series(num,name = " Variable x") sns.distplot(label_dist,color = "red") # Plot the distribution with a kenel density. estimate and rug plot: sns.distplot(label_dist,hist = False,color = "red") # Plot the distribution with a kenel density estimate and rug plot: sns.distplot(label_dist,rug = True,hist = False,color = "red") # Plot the distribution with a histogram and maximum likelihood gaussian distribution fit: from scipy.stats import norm sns.distplot(label_dist, fit=norm, kde=False) ``` ### Plot the distribution on the vertical axis: ``` sns.distplot(label_dist, vertical =True) ``` ## Let's implement with dataset ### Data Seaborn comes with built-in data sets! ``` tips = sns.load_dataset('tips') tips.head() ``` ### 1 `distplot()` The **`distplot()`** shows the distribution of a univariate set of observations. ``` sns.distplot(tips['total_bill']) # Safe to ignore warnings sns.distplot(tips['total_bill'],kde=False,bins=30) ``` ### 2 `jointplot()` `jointplot()` allows you to basically match up two distplots for bivariate data. With your choice of what kind parameter to compare with: - `scatter` - `reg` - `resid` - `kde` - `hex` ``` # 'scatter' sns.jointplot(x='total_bill',y='tip',data=tips,kind='scatter') # 'hex' sns.jointplot(x='total_bill',y='tip',data=tips,kind='hex') # 'reg' sns.jointplot(x='total_bill',y='tip',data=tips,kind='reg') ``` ### 3 `pairplot()` `pairplot()` will plot pairwise relationships across an entire dataframe (for the numerical columns) and supports a color hue argument (for categorical columns). ``` sns.pairplot(tips) sns.pairplot(tips,hue='sex',palette='coolwarm') ``` ### 4 `rugplot()` `rugplots()` are actually a very simple concept, they just draw a dash mark for every point on a univariate distribution. They are the building block of a KDE plot: ``` sns.rugplot(tips['total_bill']) ``` ### 5 `kdeplot()` `kdeplots()` are **[Kernel Density Estimation plots](http://en.wikipedia.org/wiki/Kernel_density_estimation#Practical_estimation_of_the_bandwidth)**. These KDE plots replace every single observation with a Gaussian (Normal) distribution centered around that value. For example: ``` # Don't worry about understanding this code! # It's just for the diagram below import numpy as np import matplotlib.pyplot as plt from scipy import stats #Create dataset dataset = np.random.randn(25) # Create another rugplot sns.rugplot(dataset); # Set up the x-axis for the plot x_min = dataset.min() - 2 x_max = dataset.max() + 2 # 100 equally spaced points from x_min to x_max x_axis = np.linspace(x_min,x_max,100) # Set up the bandwidth, for info on this: url = 'http://en.wikipedia.org/wiki/Kernel_density_estimation#Practical_estimation_of_the_bandwidth' bandwidth = ((4*dataset.std()**5)/(3*len(dataset)))**.2 # Create an empty kernel list kernel_list = [] # Plot each basis function for data_point in dataset: # Create a kernel for each point and append to list kernel = stats.norm(data_point,bandwidth).pdf(x_axis) kernel_list.append(kernel) #Scale for plotting kernel = kernel / kernel.max() kernel = kernel * .4 plt.plot(x_axis,kernel,color = 'grey',alpha=0.5) plt.ylim(0,1) # To get the kde plot we can sum these basis functions. # Plot the sum of the basis function sum_of_kde = np.sum(kernel_list,axis=0) # Plot figure fig = plt.plot(x_axis,sum_of_kde,color='indianred') # Add the initial rugplot sns.rugplot(dataset,c = 'indianred') # Get rid of y-tick marks plt.yticks([]) # Set title plt.suptitle("Sum of the Basis Functions") sns.kdeplot(tips['total_bill']) sns.rugplot(tips['total_bill']) sns.kdeplot(tips['tip']) sns.rugplot(tips['tip']) ``` Alright! Since we've finished with Distribution Plots in our next lecture where we shall be discussing few other plots which deal quite heavily with **[Categorical Data Plots](https://github.com/milaan9/12_Python_Seaborn_Module/blob/main/009_Seaborn_Categorical_Swarm_Plot.ipynb)**, that is commonly seen across.
github_jupyter
# Basic Logistic Regression Vamos a considerar los datos de [Kaggle](https://www.kaggle.com/c/tabular-playground-series-jun-2021/code?competitionId=26480), para ajustar un modelo de regresión logística que haga predicciones sobre la categoría. Ideas para este ajuste son de [aquí](https://www.kaggle.com/whenthetidegoesout/june-playground). ## Bibliotecas básicas ``` import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn import preprocessing import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import f1_score, classification_report, plot_confusion_matrix from sklearn.neighbors import KNeighborsClassifier import random from sklearn.linear_model import LogisticRegression ``` ## Carga de datos Para ejecutar este comando es necesario dar la ubicación del archivo `train.csv` ``` dataf = pd.read_csv('tabular-playground-series-jun-2021/train.csv') dataf ``` ## Análisis básico ``` dataf.head() dataf.info() dataf.isna().sum() ``` Tenemos que: - Todas las columnas de variables `X` son numéricas - La columna `y` es del tipo `object` - No tenemos nulos Ahora analicemos el número de observaciones por categoría ``` viz_a = dataf.groupby('target').count()['id'] viz_a.plot(kind='bar', title='Count of Tartgets', figsize=(15, 12)) dataf.groupby('target').count()['id'] ``` ## Mini sample y Separación train/test Tenemos `200000` renglones, así que para agilizar los cálculos usaremos una mini muestra de los datos ``` dataf = pd.read_csv('tabular-playground-series-jun-2021/train.csv') size_of_mini_sample = 5000 dataf = dataf.sample(n=size_of_mini_sample, random_state=42) # también se podría usar una fracción con `frac=0.4` y = dataf.target X = dataf.drop('target', axis='columns') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,random_state=42) sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) knn = KNeighborsClassifier(n_neighbors = 17) knn.fit(X_train, y_train) y_pred_train = knn.predict(X_train) y_pred_test = knn.predict(X_test) print("Train f1 Score: ",f1_score(y_train, y_pred_train,average='micro')) print("Test f1 Score: ",f1_score(y_test, y_pred_test,average='micro')) print(classification_report(y_test, y_pred_test)) acc = [] # Will take some time from sklearn import metrics for i in range(1,40): neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train) yhat = neigh.predict(X_test) acc.append(metrics.accuracy_score(y_test, yhat)) plt.figure(figsize=(10,6)) plt.plot(range(1,40),acc,color = 'blue',linestyle='dashed', marker='o',markerfacecolor='red', markersize=10) plt.title('accuracy vs. K Value') plt.xlabel('K') plt.ylabel('Accuracy') print("Maximum accuracy:-",max(acc),"at K =",acc.index(max(acc))) acc = [] # Will take some time from sklearn import metrics for i in range(41,70): neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train) yhat = neigh.predict(X_test) acc.append(metrics.accuracy_score(y_test, yhat)) plt.figure(figsize=(10,6)) plt.plot(range(41,70),acc,color = 'blue',linestyle='dashed', marker='o',markerfacecolor='red', markersize=10) plt.title('accuracy vs. K Value') plt.xlabel('K') plt.ylabel('Accuracy') print("Maximum accuracy:-",max(acc),"at K =",acc.index(max(acc))) ``` # Logistic regression ``` dataf = pd.read_csv('tabular-playground-series-jun-2021/train.csv') # size_of_mini_sample = 10000 # dataf = dataf.sample(n=size_of_mini_sample, random_state=42) # también se podría usar una fracción con `frac=0.4` y = dataf.target X = dataf.drop('target', axis='columns') y X #X_train, X_test, y_train, y_test X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) logreg = LogisticRegression() logreg.fit(X_train, y_train) y_pred_train = logreg.predict(X_train) y_pred_test = logreg.predict(X_test) print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(logreg.score(X_test, y_test))) print("Train f1 Score: ",f1_score(y_train, y_pred_train,average='micro')) print("Test f1 Score: ",f1_score(y_test, y_pred_test,average='micro')) print(classification_report(y_test, y_pred_test)) pca = PCA(n_components=74) pca.fit(X) X_pca = pca.transform(X) # Calculate cumulative explained variance across all PCs cum_exp_var = [] var_exp = 0 for i in pca.explained_variance_ratio_: var_exp += i cum_exp_var.append(var_exp) # Plot cumulative explained variance for all PCs fig, ax = plt.subplots(figsize=(8,6)) ax.bar(range(1,75), cum_exp_var) ax.set_xlabel('# Principal Components') ax.set_ylabel('% Cumulative Variance Explained'); # Se cargan las librerías que se van a utilizar en ambos ejemplos import math import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn from sklearn.impute import SimpleImputer from sklearn.compose import make_column_transformer from sklearn.model_selection import cross_val_score from sklearn.preprocessing import OneHotEncoder from sklearn.pipeline import make_pipeline from sklearn.preprocessing import PolynomialFeatures # <------ library to perform Polynomial Regression from sklearn.linear_model import Ridge from sklearn.linear_model import LinearRegression from sklearn import metrics from sklearn.metrics import mean_squared_error from sklearn.preprocessing import scale from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.linear_model import Lasso from sklearn.model_selection import GridSearchCV from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler, OrdinalEncoder, OneHotEncoder pd.set_option('display.max_rows', 90) # by default is 10, if change to None print ALL pd.set_option('display.max_columns', 90) # by default is 10, if change to None print ALL ## 1) EXTRAER DATOS # Los datos pueden encontrarse en diferentes formatos, en nuestro caso están en formato csv. # Se carga la base de datos train = pd.read_csv('train.csv') #Se encuentra en la misma carpeta que el jupyter notebook test = pd.read_csv('test.csv') #Se encuentra en la misma carpeta que el jupyter notebook print(train.shape) print(test.shape) train ``` # Eliminate of some columns Veamos a eleminar las columnas que tienen más del 50% de sus valores como nulas en train ``` col_plus_50percent_null = train.isnull().sum()[train.isnull().sum()>train.shape[0]/2] col_plus_50percent_null ``` Observemos que también hay casi las mismas columnas en test ``` test.isnull().sum()[test.isnull().sum()>test.shape[0]/2] ``` Entonces nos queda ``` features_drop = ['PoolQC','MiscFeature','Alley','Fence'] train = train.drop(features_drop, axis=1) test = test.drop(features_drop, axis=1) ``` Comprovemos que ya no tenemos esas variables ``` col_plus_50percent_null = train.isnull().sum()[train.isnull().sum()>train.shape[0]/2] col_plus_50percent_null test.isnull().sum()[test.isnull().sum()>test.shape[0]/2] ``` # Separación de variables Separemos las variables en `X_train`, `X_test`, `y_train`, `y_test`, al igual que elijamos que columnas son numericas, ordinales y nominales ``` numerical = train.select_dtypes(include=np.number).columns.tolist() numerical.remove('Id') numerical.remove('SalePrice') nominal = train.select_dtypes(exclude=np.number).columns.tolist() # ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd", # "ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure", # "KitchenQual", "Functional", "GarageCond", "PavedDrive"] ordinal = [] X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs y = train['SalePrice'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) X_REAL_test = test[nominal + ordinal + numerical] ``` # Pipelines auxiliares Para separar mejor el procesamiento de nuestros datos, utilizamos tres pipelines auxiliares ``` # Pipeline datos ordinales ordinal_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OrdinalEncoder()) ]) # Pipeline datos nominales nominal_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore")) ]) # Pipeline datos numéricos numerical_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="mean")), ("scaler", StandardScaler()) ]) # Pegado de los tres pipelines preprocessing_pipeline = ColumnTransformer([ ("nominal_preprocessor", nominal_pipeline, nominal), ("ordinal_preprocessor", ordinal_pipeline, ordinal), ("numerical_preprocessor", numerical_pipeline, numerical) ]) ``` Finalmente agregamos todo en un solo pipeline ``` # preprocessed_features = preprocessing_pipeline.fit_transform(train_features) # ML_model = Lasso(alpha=190) # ML_model = Ridge(alpha=20) ML_model = LinearRegression() complete_pipeline = Pipeline([ ("preprocessor", preprocessing_pipeline), ("estimator", ML_model) ]) complete_pipeline ``` # Predicciones ``` complete_pipeline.fit(X_train, y_train) y_pred = complete_pipeline.predict(X_test) print('ERRORS OF PREDICTIONS') print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print('r2_score:', r2_score(y_test, y_pred)) p1 = max(max(y_pred), max(y_test)) p2 = min(min(y_pred), min(y_test)) plt.plot([p1, p2], [p1, p2], 'b-') plt.scatter(y_test,y_pred) ``` # Generación de archivo para Kaggle ``` y_REAL_test = complete_pipeline.predict(X_REAL_test) pred=pd.DataFrame(y_REAL_test) sub_df=pd.read_csv('sample_submission.csv') datasets=pd.concat([sub_df['Id'],pred],axis=1) datasets.columns=['Id','SalePrice'] datasets.to_csv('sample_submission.csv',index=False) ``` Para subir el archivo es [aquí](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/overview/evaluation) ``` # FUNCIONA PIPELINE LASSO WITH numerical = train.select_dtypes(include=np.number).columns.tolist() numerical.remove('Id') numerical.remove('SalePrice') nominal = train.select_dtypes(exclude=np.number).columns.tolist() ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd", "ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure", "KitchenQual", "Functional", "GarageCond", "PavedDrive"] ordinal = [] X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs y = train['SalePrice'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) ordinal_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OrdinalEncoder()) ]) nominal_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore")) ]) numerical_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="mean")), ("scaler", StandardScaler()) ]) # here we are going to instantiate a ColumnTransformer object with a list of tuples # each of which has a the name of the preprocessor # the transformation pipeline (could be a transformer) # and the list of column names we wish to transform preprocessing_pipeline = ColumnTransformer([ ("nominal_preprocessor", nominal_pipeline, nominal), ("ordinal_preprocessor", ordinal_pipeline, ordinal), ("numerical_preprocessor", numerical_pipeline, numerical) ]) ## If you want to test this pipeline run the following code # preprocessed_features = preprocessing_pipeline.fit_transform(train_features) ML_model = Lasso(alpha=1) ML_model = Ridge(alpha=.1) # ML_model = LinearRegression() complete_pipeline = Pipeline([ ("preprocessor", preprocessing_pipeline), # ("scaler", StandardScaler()), # No mejora la estimación escalando # ('poly_features', PolynomialFeatures(degree=2)), # empeora con polynomal features ("estimator", ML_model) ]) complete_pipeline.fit(X_train, y_train) y_pred = complete_pipeline.predict(X_test) print('ERRORS OF PREDICTIONS') print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print('r2_score', r2_score(y_test, y_pred)) p1 = max(max(y_pred), max(y_test)) p2 = min(min(y_pred), min(y_test)) plt.plot([p1, p2], [p1, p2], 'b-') plt.scatter(y_test,y_pred) ``` # ALL IN ONE ``` # FUNCIONA PIPELINE LASSO WITH numerical = train.select_dtypes(include=np.number).columns.tolist() numerical.remove('Id') numerical.remove('SalePrice') nominal = train.select_dtypes(exclude=np.number).columns.tolist() ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd", "ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure", "KitchenQual", "Functional", "GarageCond", "PavedDrive"] ordinal = [] X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs y = train['SalePrice'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) ordinal_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OrdinalEncoder()) ]) nominal_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore")) ]) numerical_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="mean")), ("scaler", StandardScaler()) ]) # here we are going to instantiate a ColumnTransformer object with a list of tuples # each of which has a the name of the preprocessor # the transformation pipeline (could be a transformer) # and the list of column names we wish to transform preprocessing_pipeline = ColumnTransformer([ ("nominal_preprocessor", nominal_pipeline, nominal), ("ordinal_preprocessor", ordinal_pipeline, ordinal), ("numerical_preprocessor", numerical_pipeline, numerical) ]) ## If you want to test this pipeline run the following code # preprocessed_features = preprocessing_pipeline.fit_transform(train_features) ML_model = Lasso(alpha=190) ML_model = LinearRegression() complete_pipeline = Pipeline([ ("preprocessor", preprocessing_pipeline), # ("scaler", StandardScaler()), # No mejora la estimación escalando # ('poly_features', PolynomialFeatures(degree=2)), ("estimator", LinearRegression()) ]) complete_pipeline.fit(X_train, y_train) y_pred = complete_pipeline.predict(X_test) print('ERRORS OF PREDICTIONS') print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print('r2_score', r2_score(y_test, y_pred)) p1 = max(max(y_pred), max(y_test)) p2 = min(min(y_pred), min(y_test)) plt.plot([p1, p2], [p1, p2], 'b-') plt.scatter(y_test,y_pred) aux = ct.fit_transform(X) aux = pd.df(aux) preprocessed_features = preprocessing_pipeline.fit_transform(X_train) preprocessed_features from sklearn.linear_model import Lasso from sklearn.model_selection import GridSearchCV lasso=Lasso() parameters={'alpha':[1e-15,1e-10,1e-8,1e-3,1e-2,1,5,10,20,30,35,40,45,50,55,100,200,300]} lasso_regressor=GridSearchCV(lasso,parameters,scoring='neg_mean_squared_error',cv=5) lasso_regressor.fit(preprocessing_pipeline.fit_transform(X_train),y_train) print(lasso_regressor.best_params_) print(lasso_regressor.best_score_) ``` # Encontrando alpha de Lasso (alpha = 180) ``` parameters={'alpha':[100,150,170,180,190,200,220,250,300]} ML_model=Lasso() grid = GridSearchCV(ML_model,parameters,scoring='neg_mean_squared_error',cv=5) grid.fit(preprocessing_pipeline.fit_transform(X_train),y_train) # Convert the results of CV into a dataframe results = pd.DataFrame(grid.cv_results_)[['params', 'mean_test_score', 'rank_test_score']] results.sort_values('rank_test_score') ``` # Encontrando alpha de Ridge (alpha = 20) ``` parameters={'alpha':[1e-15,1e-10,1e-8,1e-3,1e-2,1,5,10,20,30,35,40,45,50,55,100,200,300]} ML_model=Ridge() grid = GridSearchCV(ML_model,parameters,scoring='neg_mean_squared_error',cv=5) grid.fit(preprocessing_pipeline.fit_transform(X_train),y_train) # Convert the results of CV into a dataframe results = pd.DataFrame(grid.cv_results_)[['params', 'mean_test_score', 'rank_test_score']] results.sort_values('rank_test_score') ``` ## Numeric missing values # One Hot Encoder ``` model # https://salvatore-raieli.medium.com/a-complete-guide-to-linear-regression-using-gene-expression-data-regularization-f980ba6b11f7 model = Lasso(alpha = 180) model.fit(preprocessing_pipeline.fit_transform(X_train), y_train) y_pred = complete_pipeline.predict(X_test) coefs = model.coef_.flatten() names = X_train.columns genes = list(zip(names, coefs)) feature =pd.DataFrame(genes, columns = ["genes", "coefs"]) feature0 = feature.loc[(feature!=0).any(axis=1)] feature0 = feature[(feature != 0).all(1)] feature0.shape, feature.shape print(feature0.shape, feature.shape) coefs =feature0.sort_values(by=['coefs']) plt.figure(figsize=(20, 15)) g = sns.barplot(x="genes", y="coefs", data=coefs, color= "lightblue") g.figsize=(16,10) plt.xticks(rotation=45) feature0 # FUNCIONA LASSO X = train[['MSSubClass', 'LotArea', 'OverallQual']] y = train['SalePrice'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) model_lasso = Lasso(alpha=0.01) model_lasso.fit(X_train, y_train) y_pred= model_lasso.predict(X_test) print('Predictions with Polynomial Regression') print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print('r2_score', r2_score(y_test, y_pred)) plt.scatter(y_test,y_pred) # LASSO PIPELINE FUNCIONA X = train[['MSSubClass','LotArea','OverallQual','LotFrontage']]#LotFrontage y MasVnrType tiene NaNs y = train['SalePrice'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) imp_mean = SimpleImputer(missing_values =np.nan, strategy='mean') columns_imp_mean = ['LotFrontage'] scaler = StandardScaler() column_trans = make_column_transformer( (imp_mean,columns_imp_mean), remainder = 'passthrough') ML_model = Lasso(alpha=0.01) pipe = make_pipeline(column_trans, ML_model) print(cross_val_score(pipe,X_train,y_train,cv=5)) pipe.fit(X_train,y_train) y_pred= pipe.predict(X_test) print('ERRORS OF PREDICTIONS') print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print('r2_score', r2_score(y_test, y_pred)) plt.scatter(y_test,y_pred) # primero hace la división de cross-validation y después hace el pipeline, # La diferencia de hacerlo así es que entonces cuando toma promedios para calcular como llenar los missing values, # estos promedios son con respecto al cross-validation cross_val_score(pipe,X,y,cv=5,scoring='accuracy').mean() # FUNCIONA PIPELINE LASSO WITH nominal = ["MSZoning", "LotShape", "LandContour", "LotConfig", "Neighborhood", "Condition1", "BldgType", "RoofStyle", "Foundation", "CentralAir", "SaleType", "SaleCondition"] ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd", "ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure", "KitchenQual", "Functional", "GarageCond", "PavedDrive"] numerical = ["LotFrontage", "LotArea", "MasVnrArea", "BsmtFinSF1", "BsmtUnfSF", "TotalBsmtSF", "1stFlrSF", "2ndFlrSF", "GrLivArea", "GarageArea", "OpenPorchSF"] X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs y = train['SalePrice'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) ordinal_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OrdinalEncoder()) ]) nominal_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore")) ]) numerical_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="mean")), ("scaler", StandardScaler()) ]) # here we are going to instantiate a ColumnTransformer object with a list of tuples # each of which has a the name of the preprocessor # the transformation pipeline (could be a transformer) # and the list of column names we wish to transform preprocessing_pipeline = ColumnTransformer([ ("nominal_preprocessor", nominal_pipeline, nominal), ("ordinal_preprocessor", ordinal_pipeline, ordinal), ("numerical_preprocessor", numerical_pipeline, numerical) ]) ## If you want to test this pipeline run the following code # preprocessed_features = preprocessing_pipeline.fit_transform(train_features) complete_pipeline = Pipeline([ ("preprocessor", preprocessing_pipeline), ("scaler", StandardScaler()), # No mejora la estimación escalando ("estimator", LinearRegression()) ]) complete_pipeline.fit(X_train, y_train) y_pred = complete_pipeline.predict(X_test) print('ERRORS OF PREDICTIONS') print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print('r2_score', r2_score(y_test, y_pred)) plt.scatter(y_test,y_pred) # FUNCIONA PIPELINE LASSO WITH nominal = ["MSZoning", "LotShape", "LandContour", "LotConfig", "Neighborhood", "Condition1", "BldgType", "RoofStyle", "Foundation", "CentralAir", "SaleType", "SaleCondition"] ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd", "ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure", "KitchenQual", "Functional", "GarageCond", "PavedDrive"] numerical = ["LotFrontage", "LotArea", "MasVnrArea", "BsmtFinSF1", "BsmtUnfSF", "TotalBsmtSF", "1stFlrSF", "2ndFlrSF", "GrLivArea", "GarageArea", "OpenPorchSF"] X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs y = train['SalePrice'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) ordinal_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OrdinalEncoder()) ]) nominal_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore")) ]) numerical_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="mean")), ("scaler", StandardScaler()) ]) # here we are going to instantiate a ColumnTransformer object with a list of tuples # each of which has a the name of the preprocessor # the transformation pipeline (could be a transformer) # and the list of column names we wish to transform preprocessing_pipeline = ColumnTransformer([ ("nominal_preprocessor", nominal_pipeline, nominal), ("ordinal_preprocessor", ordinal_pipeline, ordinal), ("numerical_preprocessor", numerical_pipeline, numerical) ]) ## If you want to test this pipeline run the following code # preprocessed_features = preprocessing_pipeline.fit_transform(train_features) from sklearn.linear_model import LinearRegression complete_pipeline = Pipeline([ ("preprocessor", preprocessing_pipeline), ("estimator", LinearRegression()) ]) complete_pipeline.fit(X_train, y_train) y_pred = complete_pipeline.predict(X_test) print('ERRORS OF PREDICTIONS') print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print('r2_score', r2_score(y_test, y_pred)) plt.scatter(y_test,y_pred) ``` *A general rule of thumb: drop a dummy-encoded column if using a linear-based model, and do not drop it if using a tree-based model* ``` true_value = y_test predicted_value = y_pred plt.figure(figsize=(10,10)) plt.scatter(true_value, predicted_value, c='crimson') # plt.yscale('log') # plt.xscale('log') p1 = max(max(predicted_value), max(true_value)) p2 = min(min(predicted_value), min(true_value)) plt.plot([p1, p2], [p1, p2], 'b-') plt.xlabel('True Values', fontsize=15) plt.ylabel('Predictions', fontsize=15) plt.axis('equal') plt.show() ``` The next cell is from [here](https://mahmoudyusof.github.io/general/scikit-learn-pipelines/) ``` # The next cell is from https://mahmoudyusof.github.io/general/scikit-learn-pipelines/ train_df = pd.read_csv("train.csv") test_df = pd.read_csv("test.csv") ## let's create a validation set from the training set msk = np.random.rand(len(train_df)) < 0.8 val_df = train_df[~msk] train_df = train_df[msk] nominal = ["MSZoning", "LotShape", "LandContour", "LotConfig", "Neighborhood", "Condition1", "BldgType", "RoofStyle", "Foundation", "CentralAir", "SaleType", "SaleCondition"] ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd", "ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure", "KitchenQual", "Functional", "GarageCond", "PavedDrive"] numerical = ["LotFrontage", "LotArea", "MasVnrArea", "BsmtFinSF1", "BsmtUnfSF", "TotalBsmtSF", "1stFlrSF", "2ndFlrSF", "GrLivArea", "GarageArea", "OpenPorchSF"] train_features = train_df[nominal + ordinal + numerical] train_label = train_df["SalePrice"] val_features = val_df[nominal + ordinal + numerical] val_label = val_df["SalePrice"] test_features = test_df[nominal + ordinal + numerical] from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler, OrdinalEncoder, OneHotEncoder ordinal_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OrdinalEncoder()) ]) nominal_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore")) ]) numerical_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="mean")), ("scaler", StandardScaler()) ]) from sklearn.compose import ColumnTransformer # here we are going to instantiate a ColumnTransformer object with a list of tuples # each of which has a the name of the preprocessor # the transformation pipeline (could be a transformer) # and the list of column names we wish to transform preprocessing_pipeline = ColumnTransformer([ ("nominal_preprocessor", nominal_pipeline, nominal), ("ordinal_preprocessor", ordinal_pipeline, ordinal), ("numerical_preprocessor", numerical_pipeline, numerical) ]) ## If you want to test this pipeline run the following code # preprocessed_features = preprocessing_pipeline.fit_transform(train_features) from sklearn.linear_model import LinearRegression complete_pipeline = Pipeline([ ("preprocessor", preprocessing_pipeline), ("estimator", LinearRegression()) ]) complete_pipeline.fit(train_features, train_label) # score = complete_pipeline.score(val_features, val_label) # print(score) # predictions = complete_pipeline.predict(test_features) # pipe = make_pipeline(column_trans, ML_model) # print(cross_val_score(complete_pipeline,X_train,y_train,cv=5)) # pipe.fit(X_train,y_train) y_pred = complete_pipeline.predict(X_test) print('ERRORS OF PREDICTIONS') print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print('r2_score', r2_score(y_test, y_pred)) plt.scatter(y_test,y_pred) # LASSO PIPELINE FUNCIONA X = train[['MSSubClass','LotArea','OverallQual','LotFrontage']]#LotFrontage y MasVnrType tiene NaNs y = train['SalePrice'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) imp_mean = SimpleImputer(missing_values =np.nan, strategy='mean') columns_imp_mean = ['LotFrontage'] scaler = StandardScaler() column_trans = make_column_transformer( (imp_mean,columns_imp_mean), remainder = 'passthrough') ML_model = Lasso(alpha=0.01) pipe = make_pipeline(column_trans, ML_model) print(cross_val_score(pipe,X_train,y_train,cv=5)) pipe.fit(X_train,y_train) y_pred= pipe.predict(X_test) print('ERRORS OF PREDICTIONS') print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print('r2_score', r2_score(y_test, y_pred)) plt.scatter(y_test,y_pred) ```
github_jupyter
# Object Detection using Haar feature-based cascade classifiers in openCV ``` import numpy as np import cv2 import matplotlib.pyplot as plt %matplotlib inline ``` Object Detection using Haar feature-based cascade classifiers is an effective object detection method proposed by Paul Viola and Michael Jones in their paper, "Rapid Object Detection using a Boosted Cascade of Simple Features" in 2001. It is a machine learning based approach where a cascade function is trained from a lot of positive and negative images. It is then used to detect objects in other images. Here we will work with face detection. Initially, the algorithm needs a lot of positive images (images of faces) and negative images (images without faces) to train the classifier. Then we need to extract features from it. For this, Haar features shown in the below image are used. They are just like our convolutional kernel. Each feature is a single value obtained by subtracting sum of pixels under the white rectangle from sum of pixels under the black rectangle. ``` plt.imshow(plt.imread("haar_features.jpg")) ``` Now, all possible sizes and locations of each kernel are used to calculate lots of features. (Just imagine how much computation it needs? Even a 24x24 window results over 160000 features). For each feature calculation, we need to find the sum of the pixels under white and black rectangles. To solve this, they introduced the integral image. However large your image, it reduces the calculations for a given pixel to an operation involving just four pixels. Nice, isn't it? It makes things super-fast. But among all these features we calculated, most of them are irrelevant. For example, consider the image below. The top row shows two good features. The first feature selected seems to focus on the property that the region of the eyes is often darker than the region of the nose and cheeks. The second feature selected relies on the property that the eyes are darker than the bridge of the nose. But the same windows applied to cheeks or any other place is irrelevant. So how do we select the best features out of 160000+ features? It is achieved by Adaboost. ``` plt.imshow(plt.imread("haar.png")) ``` For this, we apply each and every feature on all the training images. For each feature, it finds the best threshold which will classify the faces to positive and negative. Obviously, there will be errors or misclassifications. We select the features with minimum error rate, which means they are the features that most accurately classify the face and non-face images. (The process is not as simple as this. Each image is given an equal weight in the beginning. After each classification, weights of misclassified images are increased. Then the same process is done. New error rates are calculated. Also new weights. The process is continued until the required accuracy or error rate is achieved or the required number of features are found). The final classifier is a weighted sum of these weak classifiers. It is called weak because it alone can't classify the image, but together with others forms a strong classifier. The paper says even 200 features provide detection with 95% accuracy. Their final setup had around 6000 features. (Imagine a reduction from 160000+ features to 6000 features. That is a big gain). So now you take an image. Take each 24x24 window. Apply 6000 features to it. Check if it is face or not. Wow.. Isn't it a little inefficient and time consuming? Yes, it is. The authors have a good solution for that. In an image, most of the image is non-face region. So it is a better idea to have a simple method to check if a window is not a face region. If it is not, discard it in a single shot, and don't process it again. Instead, focus on regions where there can be a face. This way, we spend more time checking possible face regions. For this they introduced the concept of Cascade of Classifiers. Instead of applying all 6000 features on a window, the features are grouped into different stages of classifiers and applied one-by-one. (Normally the first few stages will contain very many fewer features). If a window fails the first stage, discard it. We don't consider the remaining features on it. If it passes, apply the second stage of features and continue the process. The window which passes all stages is a face region. How is that plan! The authors' detector had 6000+ features with 38 stages with 1, 10, 25, 25 and 50 features in the first five stages. (The two features in the above image are actually obtained as the best two features from Adaboost). According to the authors, on average 10 features out of 6000+ are evaluated per sub-window. So this is a simple intuitive explanation of how Viola-Jones face detection works. Read the paper for more details or check out the references in the Additional Resources section. OpenCV provides a training method (see Cascade Classifier Training) or pretrained models, that can be read using the cv::CascadeClassifier::load method. The pretrained models are located in the data folder in the OpenCV installation or can be found here. The following code example will use pretrained Haar cascade models to detect faces and eyes in an image. First, a cv::CascadeClassifier is created and the necessary XML file is loaded using the cv::CascadeClassifier::load method. Afterwards, the detection is done using the cv::CascadeClassifier::detectMultiScale method, which returns boundary rectangles for the detected faces or eyes. ### first download required XML files from https://github.com/opencv/opencv/tree/master/data/haarcascades ``` import numpy as np import cv2 face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml') img = cv2.imread('asso-myron-WWI5OxDXdVY-unsplash.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x,y,w,h) in faces: img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) roi_gray = gray[y:y+h, x:x+w] roi_color = img[y:y+h, x:x+w] eyes = eye_cascade.detectMultiScale(roi_gray) for (ex,ey,ew,eh) in eyes: cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) plt.figure(figsize=(10,10)) plt.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2RGB)) cv2.waitKey(0) cv2.destroyAllWindows() ``` ### references: - https://ieeexplore.ieee.org/abstract/document/990517 Paul Viola and Michael J. Jones. Robust real-time face detection. International Journal of Computer Vision, 57(2):137–154, 2004. - https://docs.opencv.org/master/db/d28/tutorial_cascade_classifier.html - https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_objdetect/py_face_detection/py_face_detection.html#face-detection - https://unsplash.com/photos/WWI5OxDXdVY image credits
github_jupyter
# OpenPredict API examples Example calls to the OpenPredict Smart API accessible at https://openpredict.semanticscience.org ## Get predictions for a list of MONDO diseases Example to convert multiple MONDO IDs to OMIM with Translator NodeNormalization API, then query the OpenPredict API, for predicted drugs. ``` import json import requests max_predictions_returned = 5 mondo_ids_list = ["MONDO:0018874", "MONDO:0008734", "MONDO:0004056", "MONDO:0005499", "MONDO:0006256", "MONDO:0006143", "MONDO:0019087", "MONDO:0002271", "MONDO:0003093", "MONDO:0018177", "MONDO:0010150", "MONDO:0017885", "MONDO:0005005", "MONDO:0017884", "MONDO:0007256", "MONDO:0005061", "MONDO:0005097", "MONDO:0018905", "MONDO:0005065", "MONDO:0006046", "MONDO:0006047", "MONDO:0004974", "MONDO:0005082", "MONDO:0002169", "MONDO:0005089", "MONDO:0005012", "MONDO:0005036", "MONDO:0010108", "MONDO:0006456", "MONDO:0015075", "MONDO:0006485", "MONDO:0000553", "MONDO:0006486", "MONDO:0004967", "MONDO:0005170", "MONDO:0005072", "MONDO:0008433", "MONDO:0004163", "MONDO:0000554", "MONDO:0005580", "MONDO:0004093", "MONDO:0000448"] # First query Translator NodeNormalization API to convert MONDO IDs to OMIM IDs resolve_curies = requests.get('https://nodenormalization-sri.renci.org/get_normalized_nodes', params={'curie': mondo_ids_list}) # Get corresponding OMIM IDs for MONDO IDs if match resp = resolve_curies.json() for mondo_id, alt_ids in resp.items(): for alt_id in alt_ids['equivalent_identifiers']: if str(alt_id['identifier']).startswith('OMIM'): print('🗺 Mapped ' + mondo_id + ' - "' + alt_ids['id']['label'] + '" to ' + alt_id['identifier']) print('🔎 Searching drug predictions for ' + alt_id['identifier']) # Query OpenPredict API with OMIM IDs using /predict call (faster than TRAPI /query) get_predictions = requests.get('https://openpredict.semanticscience.org/predict', params={ 'disease_id': alt_id['identifier'], 'n_results': max_predictions_returned }).json() # Display predictions found if 'hits' in get_predictions: print('🔮 Drug predictions found:') for prediction in get_predictions['hits']: # print(prediction) predicted_label = prediction['id'] if 'label' in prediction.keys(): predicted_label = predicted_label + ' - ' + prediction['label'] print(predicted_label + ' - score: ' + str(prediction['score'])) else: print('❌️ No drug prediction found') print('') ``` ## TRAPI `/query` Send a query described using the Translator Reasoner API standard to OpenPredict: ``` data = json.dumps({ "message": { "n_results": "2", "query_graph": { "edges": { "e01": { "object": "n1", "predicate": "biolink:treated_by", "subject": "n0" } }, "nodes": { "n0": { "category": "biolink:Drug", "id": "DRUGBANK:DB00394" }, "n1": { "category": "biolink:Disease" } } }, "query_options": { "min_score": "0.5" } } }) headers = {'Content-type': 'application/json'} predict_drugs = requests.post('https://openpredict.semanticscience.org/query', data=data, headers=headers) print(json.dumps( predict_drugs.json(), indent = 2)) ``` ## Simpler `/predict` operation This operation allow to quickly get predicted drug/disease for the given drug/disease (same as the TRAPI operation, but simpler to query) Example providing a `disease_id`, we can also provide a `drug_id` in the params: `{'drug_id':"DRUGBANK:DB00394"}` ``` # For a disease predict_diseases = requests.get('https://openpredict.semanticscience.org/predict', params={'disease_id':"OMIM:268220", 'model_id':'openpredict-baseline-omim-drugbank', 'n_results': 2}) print(json.dumps( predict_diseases.json(), indent = 2)) ``` ## Translator identifiers resolution Example query to convert OMIM ID to MONDO ID using the Translator NodeNormalization API: Translator ID preferences: * [For Disease](https://github.com/biolink/biolink-model/blob/master/biolink-model.yaml#L2853) * [Chemical substance](https://github.com/biolink/biolink-model/blob/master/biolink-model.yaml#L3097) * [Drug](https://github.com/biolink/biolink-model/blob/master/biolink-model.yaml#L3119) Resolve CURIEs using Translator API services: ``` resolve_curies = requests.get('https://nodenormalization-sri.renci.org/get_normalized_nodes', params={'curie':["OMIM:268220", "DRUGBANK:DB00394", "HP:0007354", "HGNC:613"]}) print( json.dumps( resolve_curies.json(), indent = 2)) ```
github_jupyter
``` import open3d as o3d import numpy as np import matplotlib.pyplot as plt import copy import os import sys # only needed for tutorial, monkey patches visualization sys.path.append('..') import open3d_tutorial as o3dtut # change to True if you want to interact with the visualization windows o3dtut.interactive = not "CI" in os.environ ``` # Octree An **octree** is a tree data structure where each internal node has eight children. Octrees are commonly used for spatial partitioning of 3D point clouds. Non-empty leaf nodes of an octree contain one or more points that fall within the same spatial subdivision. Octrees are a useful description of 3D space and can be used to quickly find nearby points. Open3D has the geometry type `Octree` that can be used to create, search, and traverse octrees with a user-specified maximum tree depth, `max_depth`. ## From point cloud An octree can be constructed from a point cloud using the method `convert_from_point_cloud`. Each point is inserted into the tree by following the path from the root node to the appropriate leaf node at depth `max_depth`. As the tree depth increases, internal (and eventually leaf) nodes represents a smaller partition of 3D space. If the point cloud has color, the the corresponding leaf node takes the color of the last inserted point. The `size_expand` parameter increases the size of the root octree node so it is slightly bigger than the original point cloud bounds to accomodate all points. ``` print('input') N = 2000 armadillo = o3d.data.ArmadilloMesh() mesh = o3d.io.read_triangle_mesh(armadillo.path) pcd = mesh.sample_points_poisson_disk(N) # fit to unit cube pcd.scale(1 / np.max(pcd.get_max_bound() - pcd.get_min_bound()), center=pcd.get_center()) pcd.colors = o3d.utility.Vector3dVector(np.random.uniform(0, 1, size=(N, 3))) o3d.visualization.draw_geometries([pcd]) print('octree division') octree = o3d.geometry.Octree(max_depth=4) octree.convert_from_point_cloud(pcd, size_expand=0.01) o3d.visualization.draw_geometries([octree]) ``` ## From voxel grid An octree can also be constructed from an Open3D `VoxelGrid` geometry using the method `create_from_voxel_grid`. Each voxel of the input `VoxelGrid` is treated as a point in 3D space with coordinates corresponding to the origin of the voxel. Each leaf node takes the color of its corresponding voxel. ``` print('voxelization') voxel_grid = o3d.geometry.VoxelGrid.create_from_point_cloud(pcd, voxel_size=0.05) o3d.visualization.draw_geometries([voxel_grid]) print('octree division') octree = o3d.geometry.Octree(max_depth=4) octree.create_from_voxel_grid(voxel_grid) o3d.visualization.draw_geometries([octree]) ``` Additionally, an `Octree` can be coverted to a `VoxelGrid` with `to_voxel_grid`. ## Traversal An octree can be traversed which can be useful for searching or processing subsections of 3D geometry. By providing the `traverse` method with a callback, each time a node (internal or leaf) is visited, additional processing can be performed. In the following example, an early stopping criterion is used to only process internal/leaf nodes with more than a certain number of points. This early stopping ability can be used to efficiently process spatial regions meeting certain conditions. ``` def f_traverse(node, node_info): early_stop = False if isinstance(node, o3d.geometry.OctreeInternalNode): if isinstance(node, o3d.geometry.OctreeInternalPointNode): n = 0 for child in node.children: if child is not None: n += 1 print( "{}{}: Internal node at depth {} has {} children and {} points ({})" .format(' ' * node_info.depth, node_info.child_index, node_info.depth, n, len(node.indices), node_info.origin)) # we only want to process nodes / spatial regions with enough points early_stop = len(node.indices) < 250 elif isinstance(node, o3d.geometry.OctreeLeafNode): if isinstance(node, o3d.geometry.OctreePointColorLeafNode): print("{}{}: Leaf node at depth {} has {} points with origin {}". format(' ' * node_info.depth, node_info.child_index, node_info.depth, len(node.indices), node_info.origin)) else: raise NotImplementedError('Node type not recognized!') # early stopping: if True, traversal of children of the current node will be skipped return early_stop octree = o3d.geometry.Octree(max_depth=4) octree.convert_from_point_cloud(pcd, size_expand=0.01) octree.traverse(f_traverse) ``` ## Find leaf node containing point Using the above traversal mechanism, an octree can be quickly searched for the leaf node that contains a given point. This functionality is provided via the `locate_leaf_node` method. ``` octree.locate_leaf_node(pcd.points[0]) ```
github_jupyter
# Quick Start This notebook demonstrates how to use MARO's reinforcement learning (RL) toolkit to solve the container inventory management ([CIM](https://maro.readthedocs.io/en/latest/scenarios/container_inventory_management.html)) problem. It is formalized as a multi-agent reinforcement learning problem, where each port acts as a decision agent. When a vessel arrives at a port, these agents must take actions by transfering a certain amount of containers to / from the vessel. The objective is for the agents to learn policies that minimize the cumulative container shortage. ``` import numpy as np # Common info common_config = { "port_attributes": ["empty", "full", "on_shipper", "on_consignee", "booking", "shortage", "fulfillment"], "vessel_attributes": ["empty", "full", "remaining_space"], "action_space": list(np.linspace(-1.0, 1.0, 21)), # Parameters for computing states "look_back": 7, "max_ports_downstream": 2, # Parameters for computing actions "finite_vessel_space": True, "has_early_discharge": True, # Parameters for computing rewards "reward_time_window": 99, "fulfillment_factor": 1.0, "shortage_factor": 1.0, "time_decay": 0.97 } ``` ## Shaping ``` from collections import defaultdict import numpy as np from maro.rl import Trajectory from maro.simulator.scenarios.cim.common import Action, ActionType class CIMTrajectory(Trajectory): def __init__( self, env, *, port_attributes, vessel_attributes, action_space, look_back, max_ports_downstream, reward_time_window, fulfillment_factor, shortage_factor, time_decay, finite_vessel_space=True, has_early_discharge=True ): super().__init__(env) self.port_attributes = port_attributes self.vessel_attributes = vessel_attributes self.action_space = action_space self.look_back = look_back self.max_ports_downstream = max_ports_downstream self.reward_time_window = reward_time_window self.fulfillment_factor = fulfillment_factor self.shortage_factor = shortage_factor self.time_decay = time_decay self.finite_vessel_space = finite_vessel_space self.has_early_discharge = has_early_discharge def get_state(self, event): vessel_snapshots, port_snapshots = self.env.snapshot_list["vessels"], self.env.snapshot_list["ports"] tick, port_idx, vessel_idx = event.tick, event.port_idx, event.vessel_idx ticks = [max(0, tick - rt) for rt in range(self.look_back - 1)] future_port_idx_list = vessel_snapshots[tick: vessel_idx: 'future_stop_list'].astype('int') port_features = port_snapshots[ticks: [port_idx] + list(future_port_idx_list): self.port_attributes] vessel_features = vessel_snapshots[tick: vessel_idx: self.vessel_attributes] return {port_idx: np.concatenate((port_features, vessel_features))} def get_action(self, action_by_agent, event): vessel_snapshots = self.env.snapshot_list["vessels"] action_info = list(action_by_agent.values())[0] model_action = action_info[0] if isinstance(action_info, tuple) else action_info scope, tick, port, vessel = event.action_scope, event.tick, event.port_idx, event.vessel_idx zero_action_idx = len(self.action_space) / 2 # index corresponding to value zero. vessel_space = vessel_snapshots[tick:vessel:self.vessel_attributes][2] if self.finite_vessel_space else float("inf") early_discharge = vessel_snapshots[tick:vessel:"early_discharge"][0] if self.has_early_discharge else 0 percent = abs(self.action_space[model_action]) if model_action < zero_action_idx: action_type = ActionType.LOAD actual_action = min(round(percent * scope.load), vessel_space) elif model_action > zero_action_idx: action_type = ActionType.DISCHARGE plan_action = percent * (scope.discharge + early_discharge) - early_discharge actual_action = round(plan_action) if plan_action > 0 else round(percent * scope.discharge) else: actual_action, action_type = 0, None return {port: Action(vessel, port, actual_action, action_type)} def get_offline_reward(self, event): port_snapshots = self.env.snapshot_list["ports"] start_tick = event.tick + 1 ticks = list(range(start_tick, start_tick + self.reward_time_window)) future_fulfillment = port_snapshots[ticks::"fulfillment"] future_shortage = port_snapshots[ticks::"shortage"] decay_list = [ self.time_decay ** i for i in range(self.reward_time_window) for _ in range(future_fulfillment.shape[0] // self.reward_time_window) ] tot_fulfillment = np.dot(future_fulfillment, decay_list) tot_shortage = np.dot(future_shortage, decay_list) return np.float32(self.fulfillment_factor * tot_fulfillment - self.shortage_factor * tot_shortage) def on_env_feedback(self, event, state_by_agent, action_by_agent, reward): self.trajectory["event"].append(event) self.trajectory["state"].append(state_by_agent) self.trajectory["action"].append(action_by_agent) def on_finish(self): training_data = {} for event, state, action in zip(self.trajectory["event"], self.trajectory["state"], self.trajectory["action"]): agent_id = list(state.keys())[0] data = training_data.setdefault(agent_id, {"args": [[] for _ in range(4)]}) data["args"][0].append(state[agent_id]) # state data["args"][1].append(action[agent_id][0]) # action data["args"][2].append(action[agent_id][1]) # log_p data["args"][3].append(self.get_offline_reward(event)) # reward for agent_id in training_data: training_data[agent_id]["args"] = [ np.asarray(vals, dtype=np.float32 if i == 3 else None) for i, vals in enumerate(training_data[agent_id]["args"]) ] return training_data ``` ## [Agent](https://maro.readthedocs.io/en/latest/key_components/rl_toolkit.html#agent) The out-of-the-box ActorCritic is used as our agent. ``` import torch.nn as nn from torch.optim import Adam, RMSprop from maro.rl import ActorCritic, ActorCriticConfig, FullyConnectedBlock, OptimOption, SimpleMultiHeadModel # We consider the port in question as well as two downstream ports. # We consider the states of these ports over the past 7 days plus the current day, hence the factor 8. input_dim = ( (common_config["look_back"] + 1) * (common_config["max_ports_downstream"] + 1) * len(common_config["port_attributes"]) + len(common_config["vessel_attributes"]) ) agent_config = { "model": { "actor": { "input_dim": input_dim, "output_dim": len(common_config["action_space"]), "hidden_dims": [256, 128, 64], "activation": nn.Tanh, "softmax": True, "batch_norm": False, "head": True }, "critic": { "input_dim": input_dim, "output_dim": 1, "hidden_dims": [256, 128, 64], "activation": nn.LeakyReLU, "softmax": False, "batch_norm": True, "head": True } }, "optimization": { "actor": OptimOption(optim_cls=Adam, optim_params={"lr": 0.001}), "critic": OptimOption(optim_cls=RMSprop, optim_params={"lr": 0.001}) }, "hyper_params": { "reward_discount": .0, "critic_loss_func": nn.SmoothL1Loss(), "train_iters": 10, "actor_loss_coefficient": 0.1, # loss = actor_loss_coefficient * actor_loss + critic_loss "k": 1, # for k-step return "lam": 0.0 # lambda return coefficient } } def get_ac_agent(): actor_net = FullyConnectedBlock(**agent_config["model"]["actor"]) critic_net = FullyConnectedBlock(**agent_config["model"]["critic"]) ac_model = SimpleMultiHeadModel( {"actor": actor_net, "critic": critic_net}, optim_option=agent_config["optimization"], ) return ActorCritic(ac_model, ActorCriticConfig(**agent_config["hyper_params"])) ``` ## Training This code cell demonstrates a typical single-threaded training workflow. ``` from maro.simulator import Env from maro.rl import Actor, MultiAgentWrapper, OnPolicyLearner from maro.utils import set_seeds set_seeds(1024) # for reproducibility env = Env("cim", "toy.4p_ssdd_l0.0", durations=1120) agent = MultiAgentWrapper({name: get_ac_agent() for name in env.agent_idx_list}) actor = Actor(env, agent, CIMTrajectory, trajectory_kwargs=common_config) learner = OnPolicyLearner(actor, 40) # 40 episodes learner.run() ```
github_jupyter
# Using PyTorch for simple regression [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/parrt/fundamentals-of-deep-learning/blob/main/notebooks/2.pytorch-nn-training-cars.ipynb) By [Terence Parr](https://explained.ai). Once we can implement our own gradient descent using pytorch autograd and matrix algebra, it's time to graduate to using pytorch's built-in neural network module and the built-in optimizers (e.g., Adam). Next, we observe how a sequence of two linear models is effectively the same as a single linear model. After we add a nonlinearity, we see more sophisticated curve fitting. Then we see how a sequence of multiple linear units plus nonlinearities affects predictions. Finally, we see what happens if we give a model too much power: the regression curve over fits the training data. ## Support code ``` import os import sys import torch import torch.nn as nn import numpy as np import pandas as pd from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt %config InlineBackend.figure_format = 'retina' import tsensor def carshow(m, b, X, y, xcol='WGT', file=None): fig, ax = plt.subplots(figsize=(4,3)) ax.scatter(X, y, s=15, color='#F46C43', alpha=.5) ax.plot(X, m * X + b, lw=.5, color='black') ax.set_title(f"$y = {m:.3f}x + {b:.2f}$") if sum(X)/len(X)<1.0: ax.set_xlabel(f"{xcol} (Standardized)") else: ax.set_xlabel(xcol) ax.set_ylabel("MPG") if file: plt.savefig(f"/Users/{os.environ['USER']}/Desktop/{file}.pdf") plt.show() def mshow(model, X, y, xcol='WGT', file=None): "Plot X[xcol] vs MPG (y) and show model predictions for each x" fig, ax = plt.subplots(figsize=(4,3)) with torch.no_grad(): y_pred = model(X) ax.scatter(X, y, s=15, color='#F46C43', alpha=.5) ax.scatter(X, y_pred, lw=.5, color='black', s=1) if sum(X)/len(X)<1.0: ax.set_xlabel(f"{xcol} (Standardized)") else: ax.set_xlabel(xcol) ax.set_ylabel("MPG") plt.tight_layout() if file: plt.savefig(f"/Users/{os.environ['USER']}/Desktop/{file}.pdf") plt.show() ``` ## Load simple data set ``` df_cars = pd.read_csv("data/cars.csv") df_cars.head(2) # normalize the data again; regression will be WGT -> MPG n = len(df_cars) X = torch.tensor(df_cars.WGT).float().reshape(n,1) X = (X-torch.mean(X))/torch.std(X) y = torch.tensor(df_cars.MPG).reshape(n,1) ``` ## Pytorch built-in linear model We've been doing our own matrix arithmetic to implement a linear model, which is really just a linear layer with one neuron in a deep learning network. Now, let's use PyTorch's built-in linear layer component. We can also avoid updating the model parameters manually, which can get very complicated, using a built-in optimizer. Let's copy the PyTorch training loop from the previous notebook and make the necessary changes. Side-by-side, here are the changes: <img src="images/LA_to_nn.png" width="800"> The `nn.Linear` object represents a single linear layer and we must specify the number of features for each instance coming in and the number of neurons in the layer. A typical linear regression model is ```python model = nn.Linear(in_features=1, out_features=1) ``` Graphically, that looks like: <img src="images/nn.1-layer-regr.png" width="200"> One of the key elements here is telling the Optimizer what the model parameters are. The `parameters()` method of the standard neural network layers returns a list of tensors, which the optimizer adjusts during training: ``` model = nn.Linear(1, 1) # each instance has 1 value, spit out 1 value list(model.parameters()) ``` Here is the full training loop and visualization calls: ``` model = nn.Linear(1, 1) # each instance has 1 value, spit out 1 value learning_rate = 1 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) nepochs=70 for epoch in range(nepochs+1): with tsensor.clarify(): # this visualizes any errors we get but you can remove this if you want y_pred = model(X) loss = torch.mean((y_pred - y)**2) if epoch % 10 == 0: with torch.no_grad(): mae = torch.mean(torch.abs(y_pred - y)) print(f"Epoch {epoch:3d} MSE loss {loss:12.3f} MAE {mae:8.2f}") # Backprop to compute gradients optimizer.zero_grad() loss.backward() optimizer.step() # adjust weights m = model.weight.detach().item() b = model.bias.detach().item() print(f"LAST MSE loss {loss:12.3f} MAE {mae:8.2f}") carshow(m, b, X=X, y=df_cars.MPG) mshow(model, X=X, y=df_cars.MPG) ``` ## Factor out a training method It is good programming practice to factor out code into a function that we use a lot, which is the case here for our training loop: ``` def train(model, learning_rate = .5, nepochs=2000): optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) for epoch in range(nepochs+1): y_pred = model(X) loss = torch.mean((y_pred - y)**2) if epoch % (nepochs//10) == 0: with torch.no_grad(): mae = torch.mean(torch.abs(y_pred - y)) print(f"Epoch {epoch:4d} MSE loss {loss:12.3f} MAE {mae:8.2f}") optimizer.zero_grad() loss.backward() # autograd computes w1.grad, b1.grad, ... optimizer.step() print(f"LAST MSE loss {loss:12.3f} MAE {mae:8.2f}") return loss ``` With all of those details hidden in a `train()` function, we get a very clear picture of the architecture of the model and the training. It's a simple matter of creating the appropriate model, training it, and then displaying the predictions: ``` model = nn.Linear(1, 1) train(model, learning_rate=1, nepochs=70) mshow(model, X=X, y=df_cars.MPG) ``` ## Sequence of 2 linear models with pytorch ### Without nonlinearity If we send the output of a linear model into another linear model, we get just another linear model. We have to add a nonlinearity such as a rectified linear unit (ReLU) or a sigmoid. To demonstrate that, let's build a two layer neural network where each layer has a single neuron. Graphically, that looks like: <img src="images/nn.2-layer-regr-no-nonlin.png" width="300"> ``` model = nn.Sequential( nn.Linear(1, 1), # each instance has 1 feature, spit out 1 values nn.Linear(1, 1) ) train(model, learning_rate=2.1, nepochs=100) mshow(model, X=X, y=df_cars.MPG, file="2linear") ``` As you can see, the predictions of the model still look like a best fit line through the training data. ### With ReLU nonlinearity If we stick a ReLU between the output of the first layer and the input of the second layer, all of a sudden we get a piecewise linear regression: <img src="images/nn.2-layer-regr-relu.png" width="350"> ``` model = nn.Sequential( nn.Linear(1, 1), # each instance has 1 feature, spit out 1 values nn.ReLU(), # add nonlinearity, gives two lines nn.Linear(1, 1) ) train(model, learning_rate=1, nepochs=100) mshow(model, X=X, y=df_cars.MPG, file="linRlin") ``` #### Exercise Run that cell many times and observe where the kink in the line is; it should bounce around the WGT=1 position. Why do we get slightly different models? <details> <summary>Solution</summary> We are initializing the model parameters randomly and so training starts at different locations. Our final model position in parameter space depends on our starting location because we are running for a finite number of iterations. In principle, if we ran it for a very long time we would always end up in the same spot because our model is so simple. </details> ## Layers with multiple neurons Each linear model is fairly weak because it can only draw a single line. Adding multiple layers allows us to draw piecewise linear curves as we just saw. We can also stack multiple neurons at each layer. Because we are initializing the weights of every neuron (linear model) randomly, each neuron in the layer can converge on a different model during training. Graphically, it looks like this if we stack five neurons in the first layer but keep everything else the same: <img src="images/nn.2-layer-5-neuron-regr-relu.png" width="350"> It's a good idea to visualize the dimensions of the matrix and vector operations. TensorSensor can help you out here: ``` # Draw matrix shapes import torch.nn.functional as F import tsensor # https://github.com/parrt/tensor-sensor W1 = torch.rand(5,1) W2 = torch.rand(1,5) x = torch.rand(1,1) with tsensor.explain(): a1 = F.relu(W1 @ x) a2 = W2 @ a1 ``` Using the prebuilt linear layer, we increase the number of output features for the first layer to 5 and then the second layer has five input features. ``` model = nn.Sequential( nn.Linear(1, 5), nn.ReLU(), nn.Linear(5, 1) ) train(model, learning_rate=1, nepochs=100) mshow(model, X=X, y=df_cars.MPG, file="lin5Rlin") ``` #### Exercise Run that cell many times and observe where the kinks are in the line and how many there are. How would you compare the performance of this model, at its best, to the previous model? <details> <summary>Solution</summary> This model often gets a similar two-line solution just like the previous solution, but it often gets three or four line segments that seem to fit the data better. </details> #### Exercise Create a three-layer regression network that looks like the following. <img src="images/nn.3-layer-regr-relu.png" width="450"> Hint: just add another nonlinearity and a linear layer. The trick is to get the number of input and output features right. Does your solution get a smoother piecewise-linear solution? Is it worth the extra complexity and training time? <details> <summary>Solution</summary> <pre> model = nn.Sequential( nn.Linear(1, 5), nn.ReLU(), nn.Linear(5, 5), nn.ReLU(), nn.Linear(5, 1) ) train(model, learning_rate=.01, nepochs=500) mshow(model, X=X, y=df_cars.MPG, file="lin5Rlin2") </pre> </details> Here is what the matrix algebra inside the model looks like: ``` # Draw matrix shapes import torch.nn.functional as F import tsensor # https://github.com/parrt/tensor-sensor W1 = torch.rand(5,1) W2 = torch.rand(5,5) W3 = torch.rand(1,5) x = torch.rand(1,1) with tsensor.explain() as e: a1 = F.relu(W1 @ x) a2 = F.relu(W2 @ a1) a3 = W3 @ a2 ``` ## Using the logistic function (sigmoid) nonlinearity instead of ReLU Another nonlinearity we can use to glue layers together is the logistic function. It's slower to train but gives us curved lines through the data. Let's replace the rectified linear unit and see what the model predictions look like. ``` model = nn.Sequential( nn.Linear(1, 1), # each instance has 1 value, spit out 1 values nn.Sigmoid(), # doesn't converge as fast; bump learning rate nn.Linear(1, 1) ) train(model, learning_rate=0.1, nepochs=1000) mshow(model, X=X, y=df_cars.MPG, file='linSlin') ``` That's a really nice curve that fits the data very well. #### Exercise Change the learning rate from 0.1 to 2 and retrain the model multiple times. What do you observe? <details> <summary>Solution</summary> Sometimes we get a completely flat line at about y mean. Training is probably bouncing the model parameters back and forth across the valley walls and not able to descend into a minimum. </details> #### Exercise Change the learning rate to 0.01 and retrain the model multiple times. What do you observe? <details> <summary>Solution</summary> The learning rate is too small and we do not converge fast enough for the thousand iterations to reach a minimum. </details> ## Overfitting with a two-layer pytorch network ``` model = nn.Sequential( nn.Linear(1, 1000), # each instance has 1 value, spit out 1000 values nn.ReLU(), # fast to compute, not smooth y_pred nn.Linear(1000, 1) ) train(model, learning_rate=.1, nepochs=1000) mshow(model, X=X, y=df_cars.MPG, file="lin1kRlin") ``` #### Exercise Change the nonlinearity from a rectified linear unit to a sigmoid, keeping the 1000-neuron layer. Does this overfit like the ReLU? <details> <summary>Solution</summary> <pre> model = nn.Sequential( nn.Linear(1, 1000), # each instance has 1 value, spit out 1000 values nn.Sigmoid(), # add nonlinearity nn.Linear(1000, 1) ) train(model, learning_rate=.1, nepochs=1000) mshow(model, X=X, y=df_cars.MPG) </pre> <br><br> This model does not seem to overfit. This highlights the fact that different architectures with the exact same number of iterations and learning rate can give you very different models. Using the sigmoid is slower but seems to resist kinks in the curve that chase noise. </details>
github_jupyter
``` import sys import pandas as pd import botometer import os ``` # VARIABLE INITIATION ## arg 1 = 'rs' or 'sn' ## arg 2 = hour file 6,7 or 8 ? ## arg 3 = start row ## arg 4 = end row ## arg 5 = key selection, 1,2,3,4 ## sn 7 : total row 33277 ## sn 8 : total row 53310 ## rs 7 : 7230 ## rs 8 : 10493 ``` sys.argv = ['-', 'sn', '7', '0', '33277', '1', '1'] print (sys.argv[5]) mashape_key = "QRraJnMT9KmshkpJ7iu74xKFN1jtp1IyBBijsnS5NGbEuwIX54" if(int(sys.argv[5])==1): twitter_app_auth = { 'consumer_key': 'MwvJSQvmaA0ZPHxYEMRVQanTt', 'consumer_secret': 'DupqFwHhOEq8vxwtltdIUGzVEY5Eh7mXmyLEhrlmCgiPgRchFe', 'access_token': '218041595-r8KHp8e1XuIdwByEvIJ9Yo1oundwXtSSeJpFLGOC', 'access_token_secret': 'qTGgkAQ3EWCXWUnYurBQ2nziKOQ9nZfA9bta2ILueY5xP', } elif (int(sys.argv[5])==2): twitter_app_auth = { 'consumer_key': 'xQkTg8KSU7HlEEvaD8EJA', 'consumer_secret': 'TMFRBmvGdGJtzwFJ3fyluPWszl5qCDuwBUqy0AGj0g', 'access_token': '218041595-JUmLw0xEtnJVrqn03DCirlZpnL1Z7taWwKYZYUPN', 'access_token_secret': 'cIdkjvTghunH6GGLRIjQW06ghyOFkX1w7jnurcJPVyIQw', } elif (int(sys.argv[5])==3): twitter_app_auth = { 'consumer_key': 'sPzHpcj4jMital75nY7dfd4zn', 'consumer_secret': 'rTGm68zdNmLvnTc22cBoFg4eVMf3jLVDSQLOwSqE9lXbVWLweI', 'access_token': '4258226113-4UnHbbbxoRPz10thy70q9MtEk9xXfJGOpAY12KW', 'access_token_secret': '549HdasMEW0q2uV05S5s4Uj5SdCeEWT8dNdLNPiAeeWoX', } elif (int(sys.argv[5])==4): twitter_app_auth = { 'consumer_key': 'wZnIRW0aMRmHuQ3Rh5c2v7al4', 'consumer_secret': 'ugFcKDc0WP7ktDw3Ch1ZddWknckkfFiH9ZvIKFDwg7k8ivDyFB', 'access_token': '218041595-JSRBUY3CJ55km9Jb0QnJA6lQnyRoPfvpq6lNAsak', 'access_token_secret': 'ck1wTLfMP5CeLAfnbkS3U7oKxY6e0xu9C7fosq3fNH8gO', } else: twitter_app_auth = { 'consumer_key': 'kcnlkVFRADdxaWNtWNAy3LquT', 'consumer_secret': 'bAH258rRds9uWAi38kSwxgbJ1x0rAspasQACgOruuK4qnKsXld', 'access_token': '218041595-yrk9WyMnTjh4PBidhApb0DwryK83Wzr32IWi6bP4', 'access_token_secret': 'GCmOzFmzrOoAv59lCpKRQrC9e7H1P0449iaBW1rI66saS', } bom = botometer.Botometer(wait_on_ratelimit=True, mashape_key=mashape_key, **twitter_app_auth) if sys.argv[1] == 'rs': input_file = "data/distinct_userlist_rs_201606230"+sys.argv[2]+".csv" else: input_file = "data/distinct_userlist_201606230"+sys.argv[2]+".csv" bot_data = pd.read_csv(input_file, index_col = 0, names =['screen_name']) # print(len(bot_data)) distinct_uname = [] for i in bot_data.values: distinct_uname.append((str('@'+i).replace("['", "")).replace("']", '')) print (distinct_uname[int(sys.argv[3]):int(sys.argv[4])]) username_df = pd.DataFrame(distinct_uname) # print (username_df) username_df.to_csv('username_for_bot',sep=',', encoding='utf-8') username_df.head() def bot_reader_writer(): username_df = pd.read_csv('username_for_bot', index_col=0) # username_df.head()['0'].tolist() botoresult = pd.DataFrame() for screen_name, result in bom.check_accounts_in(username_df.head()['0'].tolist()): print(result) print(screen_name) botoresult = botoresult.append(result, ignore_index=True) output_bot = pd.concat([botoresult.user.apply(pd.Series), botoresult.scores.apply(pd.Series), botoresult.categories.apply(pd.Series)], axis=1) print("bot result :", len(botoresult)) print("bot output :", len(output_bot)) output_file = "data/outputbot_201606230"+sys.argv[2]+"_"+sys.argv[1]+"_"+sys.argv[6]+".csv" # if file does not exist write header if not os.path.isfile(output_file): output_bot.to_csv(output_file,sep=',', encoding='utf-8', index=False) else: # else it exists so append without writing the header output_bot.to_csv(output_file, mode = 'a', sep=',', encoding='utf-8', header=False, index=False) username_df = username_df.drop(username_df.head().index) username_df.to_csv('username_for_bot',sep=',', encoding='utf-8') username_df = pd.read_csv('username_for_bot', index_col=0) while len(username_df.head()) > 0: bot_reader_writer() # print(username_df.head()) # username_df = username_df.drop(username_df.head().index) if len(username_df.head()) <= 0: print ("Finish") ```
github_jupyter
## SimCLR: A Simple Framework for Contrastive Learning of Visual Representations This colab demonstrates how to load pretrained/finetuned SimCLR models from checkpoints or hub modules. It contains two parts: * Part I - Load checkpoints and print parameters (count) * Part II - Load hub module for inference The checkpoints are accessible in the following Google Cloud Storage folders. * Pretrained SimCLRv2 models with a linear classifier: [gs://simclr-checkpoints/simclrv2/pretrained](https://console.cloud.google.com/storage/browser/simclr-checkpoints/simclrv2/pretrained) * Fine-tuned SimCLRv2 models on 1% of labels: [gs://simclr-checkpoints/simclrv2/finetuned_1pct](https://console.cloud.google.com/storage/browser/simclr-checkpoints/simclrv2/finetuned_1pct) * Fine-tuned SimCLRv2 models on 10% of labels: [gs://simclr-checkpoints/simclrv2/finetuned_10pct](https://console.cloud.google.com/storage/browser/simclr-checkpoints/simclrv2/finetuned_10pct) * Fine-tuned SimCLRv2 models on 100% of labels: [gs://simclr-checkpoints/simclrv2/finetuned_100pct](https://console.cloud.google.com/storage/browser/simclr-checkpoints/simclrv2/finetuned_100pct) * Supervised models with the same architectures: [gs://simclr-checkpoints/simclrv2/pretrained](https://console.cloud.google.com/storage/browser/simclr-checkpoints/simclrv2/pretrained) Use the corresponding checkpoint / hub-module paths for accessing the model. For example, to use a pre-trained model (with a linear classifier) with ResNet-152 (2x+SK), set the path to `gs://simclr-checkpoints/simclrv2/pretrained/r152_2x_sk1`. ## Part I - Load checkpoints and print parameters (count) ``` import re import numpy as np import tensorflow.compat.v1 as tf tf.disable_eager_execution() import tensorflow_hub as hub import tensorflow_datasets as tfds import matplotlib import matplotlib.pyplot as plt def count_params(checkpoint, excluding_vars=[], verbose=True): vdict = checkpoint.get_variable_to_shape_map() cnt = 0 for name, shape in vdict.items(): skip = False for evar in excluding_vars: if re.search(evar, name): skip = True if skip: continue if verbose: print(name, shape) cnt += np.prod(shape) cnt = cnt / 1e6 print("Total number of parameters: {:.2f}M".format(cnt)) return cnt checkpoint_path = 'gs://simclr-checkpoints/simclrv2/finetuned_100pct/r50_1x_sk0/' checkpoint = tf.train.load_checkpoint(checkpoint_path) _ = count_params(checkpoint, excluding_vars=['global_step', "Momentum", 'ema', 'memory', 'head'], verbose=False) ``` ## Part II - Load hub module for inference ``` #@title Load class id to label text mapping from big_transfer (hidden) # Code snippet credit: https://github.com/google-research/big_transfer !wget https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt imagenet_int_to_str = {} with open('ilsvrc2012_wordnet_lemmas.txt', 'r') as f: for i in range(1000): row = f.readline() row = row.rstrip() imagenet_int_to_str.update({i: row}) tf_flowers_labels = ['dandelion', 'daisy', 'tulips', 'sunflowers', 'roses'] #@title Preprocessing functions from data_util.py in SimCLR repository (hidden). FLAGS_color_jitter_strength = 0.3 CROP_PROPORTION = 0.875 # Standard for ImageNet. def random_apply(func, p, x): """Randomly apply function func to x with probability p.""" return tf.cond( tf.less(tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32), tf.cast(p, tf.float32)), lambda: func(x), lambda: x) def random_brightness(image, max_delta, impl='simclrv2'): """A multiplicative vs additive change of brightness.""" if impl == 'simclrv2': factor = tf.random_uniform( [], tf.maximum(1.0 - max_delta, 0), 1.0 + max_delta) image = image * factor elif impl == 'simclrv1': image = random_brightness(image, max_delta=max_delta) else: raise ValueError('Unknown impl {} for random brightness.'.format(impl)) return image def to_grayscale(image, keep_channels=True): image = tf.image.rgb_to_grayscale(image) if keep_channels: image = tf.tile(image, [1, 1, 3]) return image def color_jitter(image, strength, random_order=True): """Distorts the color of the image. Args: image: The input image tensor. strength: the floating number for the strength of the color augmentation. random_order: A bool, specifying whether to randomize the jittering order. Returns: The distorted image tensor. """ brightness = 0.8 * strength contrast = 0.8 * strength saturation = 0.8 * strength hue = 0.2 * strength if random_order: return color_jitter_rand(image, brightness, contrast, saturation, hue) else: return color_jitter_nonrand(image, brightness, contrast, saturation, hue) def color_jitter_nonrand(image, brightness=0, contrast=0, saturation=0, hue=0): """Distorts the color of the image (jittering order is fixed). Args: image: The input image tensor. brightness: A float, specifying the brightness for color jitter. contrast: A float, specifying the contrast for color jitter. saturation: A float, specifying the saturation for color jitter. hue: A float, specifying the hue for color jitter. Returns: The distorted image tensor. """ with tf.name_scope('distort_color'): def apply_transform(i, x, brightness, contrast, saturation, hue): """Apply the i-th transformation.""" if brightness != 0 and i == 0: x = random_brightness(x, max_delta=brightness) elif contrast != 0 and i == 1: x = tf.image.random_contrast( x, lower=1-contrast, upper=1+contrast) elif saturation != 0 and i == 2: x = tf.image.random_saturation( x, lower=1-saturation, upper=1+saturation) elif hue != 0: x = tf.image.random_hue(x, max_delta=hue) return x for i in range(4): image = apply_transform(i, image, brightness, contrast, saturation, hue) image = tf.clip_by_value(image, 0., 1.) return image def color_jitter_rand(image, brightness=0, contrast=0, saturation=0, hue=0): """Distorts the color of the image (jittering order is random). Args: image: The input image tensor. brightness: A float, specifying the brightness for color jitter. contrast: A float, specifying the contrast for color jitter. saturation: A float, specifying the saturation for color jitter. hue: A float, specifying the hue for color jitter. Returns: The distorted image tensor. """ with tf.name_scope('distort_color'): def apply_transform(i, x): """Apply the i-th transformation.""" def brightness_foo(): if brightness == 0: return x else: return random_brightness(x, max_delta=brightness) def contrast_foo(): if contrast == 0: return x else: return tf.image.random_contrast(x, lower=1-contrast, upper=1+contrast) def saturation_foo(): if saturation == 0: return x else: return tf.image.random_saturation( x, lower=1-saturation, upper=1+saturation) def hue_foo(): if hue == 0: return x else: return tf.image.random_hue(x, max_delta=hue) x = tf.cond(tf.less(i, 2), lambda: tf.cond(tf.less(i, 1), brightness_foo, contrast_foo), lambda: tf.cond(tf.less(i, 3), saturation_foo, hue_foo)) return x perm = tf.random_shuffle(tf.range(4)) for i in range(4): image = apply_transform(perm[i], image) image = tf.clip_by_value(image, 0., 1.) return image def _compute_crop_shape( image_height, image_width, aspect_ratio, crop_proportion): """Compute aspect ratio-preserving shape for central crop. The resulting shape retains `crop_proportion` along one side and a proportion less than or equal to `crop_proportion` along the other side. Args: image_height: Height of image to be cropped. image_width: Width of image to be cropped. aspect_ratio: Desired aspect ratio (width / height) of output. crop_proportion: Proportion of image to retain along the less-cropped side. Returns: crop_height: Height of image after cropping. crop_width: Width of image after cropping. """ image_width_float = tf.cast(image_width, tf.float32) image_height_float = tf.cast(image_height, tf.float32) def _requested_aspect_ratio_wider_than_image(): crop_height = tf.cast(tf.rint( crop_proportion / aspect_ratio * image_width_float), tf.int32) crop_width = tf.cast(tf.rint( crop_proportion * image_width_float), tf.int32) return crop_height, crop_width def _image_wider_than_requested_aspect_ratio(): crop_height = tf.cast( tf.rint(crop_proportion * image_height_float), tf.int32) crop_width = tf.cast(tf.rint( crop_proportion * aspect_ratio * image_height_float), tf.int32) return crop_height, crop_width return tf.cond( aspect_ratio > image_width_float / image_height_float, _requested_aspect_ratio_wider_than_image, _image_wider_than_requested_aspect_ratio) def center_crop(image, height, width, crop_proportion): """Crops to center of image and rescales to desired size. Args: image: Image Tensor to crop. height: Height of image to be cropped. width: Width of image to be cropped. crop_proportion: Proportion of image to retain along the less-cropped side. Returns: A `height` x `width` x channels Tensor holding a central crop of `image`. """ shape = tf.shape(image) image_height = shape[0] image_width = shape[1] crop_height, crop_width = _compute_crop_shape( image_height, image_width, height / width, crop_proportion) offset_height = ((image_height - crop_height) + 1) // 2 offset_width = ((image_width - crop_width) + 1) // 2 image = tf.image.crop_to_bounding_box( image, offset_height, offset_width, crop_height, crop_width) image = tf.image.resize_bicubic([image], [height, width])[0] return image def distorted_bounding_box_crop(image, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100, scope=None): """Generates cropped_image using one of the bboxes randomly distorted. See `tf.image.sample_distorted_bounding_box` for more documentation. Args: image: `Tensor` of image data. bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` where each coordinate is [0, 1) and the coordinates are arranged as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole image. min_object_covered: An optional `float`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. aspect_ratio_range: An optional list of `float`s. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `float`s. The cropped area of the image must contain a fraction of the supplied image within in this range. max_attempts: An optional `int`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. scope: Optional `str` for name scope. Returns: (cropped image `Tensor`, distorted bbox `Tensor`). """ with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]): shape = tf.shape(image) sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( shape, bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) bbox_begin, bbox_size, _ = sample_distorted_bounding_box # Crop the image to the specified bounding box. offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) image = tf.image.crop_to_bounding_box( image, offset_y, offset_x, target_height, target_width) return image def crop_and_resize(image, height, width): """Make a random crop and resize it to height `height` and width `width`. Args: image: Tensor representing the image. height: Desired image height. width: Desired image width. Returns: A `height` x `width` x channels Tensor holding a random crop of `image`. """ bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) aspect_ratio = width / height image = distorted_bounding_box_crop( image, bbox, min_object_covered=0.1, aspect_ratio_range=(3. / 4 * aspect_ratio, 4. / 3. * aspect_ratio), area_range=(0.08, 1.0), max_attempts=100, scope=None) return tf.image.resize_bicubic([image], [height, width])[0] def gaussian_blur(image, kernel_size, sigma, padding='SAME'): """Blurs the given image with separable convolution. Args: image: Tensor of shape [height, width, channels] and dtype float to blur. kernel_size: Integer Tensor for the size of the blur kernel. This is should be an odd number. If it is an even number, the actual kernel size will be size + 1. sigma: Sigma value for gaussian operator. padding: Padding to use for the convolution. Typically 'SAME' or 'VALID'. Returns: A Tensor representing the blurred image. """ radius = tf.to_int32(kernel_size / 2) kernel_size = radius * 2 + 1 x = tf.to_float(tf.range(-radius, radius + 1)) blur_filter = tf.exp( -tf.pow(x, 2.0) / (2.0 * tf.pow(tf.to_float(sigma), 2.0))) blur_filter /= tf.reduce_sum(blur_filter) # One vertical and one horizontal filter. blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1]) blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1]) num_channels = tf.shape(image)[-1] blur_h = tf.tile(blur_h, [1, 1, num_channels, 1]) blur_v = tf.tile(blur_v, [1, 1, num_channels, 1]) expand_batch_dim = image.shape.ndims == 3 if expand_batch_dim: # Tensorflow requires batched input to convolutions, which we can fake with # an extra dimension. image = tf.expand_dims(image, axis=0) blurred = tf.nn.depthwise_conv2d( image, blur_h, strides=[1, 1, 1, 1], padding=padding) blurred = tf.nn.depthwise_conv2d( blurred, blur_v, strides=[1, 1, 1, 1], padding=padding) if expand_batch_dim: blurred = tf.squeeze(blurred, axis=0) return blurred def random_crop_with_resize(image, height, width, p=1.0): """Randomly crop and resize an image. Args: image: `Tensor` representing an image of arbitrary size. height: Height of output image. width: Width of output image. p: Probability of applying this transformation. Returns: A preprocessed image `Tensor`. """ def _transform(image): # pylint: disable=missing-docstring image = crop_and_resize(image, height, width) return image return random_apply(_transform, p=p, x=image) def random_color_jitter(image, p=1.0): def _transform(image): color_jitter_t = functools.partial( color_jitter, strength=FLAGS_color_jitter_strength) image = random_apply(color_jitter_t, p=0.8, x=image) return random_apply(to_grayscale, p=0.2, x=image) return random_apply(_transform, p=p, x=image) def random_blur(image, height, width, p=1.0): """Randomly blur an image. Args: image: `Tensor` representing an image of arbitrary size. height: Height of output image. width: Width of output image. p: probability of applying this transformation. Returns: A preprocessed image `Tensor`. """ del width def _transform(image): sigma = tf.random.uniform([], 0.1, 2.0, dtype=tf.float32) return gaussian_blur( image, kernel_size=height//10, sigma=sigma, padding='SAME') return random_apply(_transform, p=p, x=image) def batch_random_blur(images_list, height, width, blur_probability=0.5): """Apply efficient batch data transformations. Args: images_list: a list of image tensors. height: the height of image. width: the width of image. blur_probability: the probaility to apply the blur operator. Returns: Preprocessed feature list. """ def generate_selector(p, bsz): shape = [bsz, 1, 1, 1] selector = tf.cast( tf.less(tf.random_uniform(shape, 0, 1, dtype=tf.float32), p), tf.float32) return selector new_images_list = [] for images in images_list: images_new = random_blur(images, height, width, p=1.) selector = generate_selector(blur_probability, tf.shape(images)[0]) images = images_new * selector + images * (1 - selector) images = tf.clip_by_value(images, 0., 1.) new_images_list.append(images) return new_images_list def preprocess_for_train(image, height, width, color_distort=True, crop=True, flip=True): """Preprocesses the given image for training. Args: image: `Tensor` representing an image of arbitrary size. height: Height of output image. width: Width of output image. color_distort: Whether to apply the color distortion. crop: Whether to crop the image. flip: Whether or not to flip left and right of an image. Returns: A preprocessed image `Tensor`. """ if crop: image = random_crop_with_resize(image, height, width) if flip: image = tf.image.random_flip_left_right(image) if color_distort: image = random_color_jitter(image) image = tf.reshape(image, [height, width, 3]) image = tf.clip_by_value(image, 0., 1.) return image def preprocess_for_eval(image, height, width, crop=True): """Preprocesses the given image for evaluation. Args: image: `Tensor` representing an image of arbitrary size. height: Height of output image. width: Width of output image. crop: Whether or not to (center) crop the test images. Returns: A preprocessed image `Tensor`. """ if crop: image = center_crop(image, height, width, crop_proportion=CROP_PROPORTION) image = tf.reshape(image, [height, width, 3]) image = tf.clip_by_value(image, 0., 1.) return image def preprocess_image(image, height, width, is_training=False, color_distort=True, test_crop=True): """Preprocesses the given image. Args: image: `Tensor` representing an image of arbitrary size. height: Height of output image. width: Width of output image. is_training: `bool` for whether the preprocessing is for training. color_distort: whether to apply the color distortion. test_crop: whether or not to extract a central crop of the images (as for standard ImageNet evaluation) during the evaluation. Returns: A preprocessed image `Tensor` of range [0, 1]. """ image = tf.image.convert_image_dtype(image, dtype=tf.float32) if is_training: return preprocess_for_train(image, height, width, color_distort) else: return preprocess_for_eval(image, height, width, test_crop) #@title Load tensorflow datasets: we use tensorflow flower dataset as an example batch_size = 5 dataset_name = 'tf_flowers' tfds_dataset, tfds_info = tfds.load( dataset_name, split='train', with_info=True) num_images = tfds_info.splits['train'].num_examples num_classes = tfds_info.features['label'].num_classes def _preprocess(x): x['image'] = preprocess_image( x['image'], 224, 224, is_training=False, color_distort=False) return x x = tfds_dataset.map(_preprocess).batch(batch_size) x = tf.data.make_one_shot_iterator(x).get_next() tfds_dataset, x #@title Load module and get the computation graph hub_path = 'gs://simclr-checkpoints/simclrv2/finetuned_100pct/r50_1x_sk0/hub/' module = hub.Module(hub_path, trainable=False) key = module(inputs=x['image'], signature="default", as_dict=True) logits_t = key['logits_sup'][:, :] key # The accessible tensor in the return dictionary sess = tf.Session() sess.run(tf.global_variables_initializer()) image, labels, logits = sess.run((x['image'], x['label'], logits_t)) pred = logits.argmax(-1) #@title Plot the images and predictions fig, axes = plt.subplots(5, 1, figsize=(15, 15)) for i in range(5): axes[i].imshow(image[i]) true_text = tf_flowers_labels[labels[i]] pred_text = imagenet_int_to_str[pred[i]] if i == 0: axes[i].text(0, 0, 'Attention: the predictions here are inaccurate as they are constrained among 1000 ImageNet classes.\n', c='r') axes[i].axis('off') axes[i].text(256, 128, 'Truth: ' + true_text + '\n' + 'Pred: ' + pred_text) # aa = hub.Module(hub_path) a = module(image) logitss = tf.layers.dense(a, 1000) prob = tf.nn.softmax(logitss) a.shape from keras import layers key['block_group4'].shape print(np.min(sess.run(afaf)[2][:1000])) print(np.min(sess.run(logits_t))) print(len(module.variable_map)) model = tf.keras.Sequential([ module(inputs=x['image'], signature="default"), layers.Dense(1000, activation='softmax') ]) afaf = module(image) print("names:", module.get_signature_names()) print("input:", module.get_input_info_dict()) print("output:", module.get_output_info_dict()) module.get_output_info_dict(), afaf.shape afaf = module(image) sess.run(afaf).shape print(module.get_signature_names) # https://e3oroush.github.io/tsne-visualization/ import numpy as np import tensorflow as tf from PIL import Image from sklearn.manifold import TSNE import os, re, glob2, pickle from keras.engine import Model from keras.layers import Input # from keras_vggface.vggface import VGGFace from keras.preprocessing import image # from keras_vggface import utils import matplotlib.pyplot as plt %pylab inline # custom paramers: change these parameters to properly run on your machine image_path = '/home/esoroush/Datasets/MSRC/MSRC/' # addres of images no_of_images = 1600 # number of images. It is recommended to use a square of 2 number ellipside =False # elipsoid or rectangular visualization image_width = 64 # width and height of each visualized images # choices are: inception, raw and vggfaces feature_extraction = 'inception' # feature extraction method # find all images image_names = glob2.glob(image_path + "**/*.png") image_names +=glob2.glob(image_path + "**/*.jpg") image_names +=glob2.glob(image_path + "**/*.gif") # suffle images np.random.seed(3) np.random.shuffle(image_names) if no_of_images > len(image_names): no_of_images = len(image_names) image_names = image_names[:no_of_images] # Google inception pre-trained network if feature_extraction == 'inception': print('using %s network/method for feature extraction'%feature_extraction) import sys, tarfile from six.moves import urllib model_dir = os.path.join(os.environ['HOME'], '.tensorflow/models') DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' def create_graph(): """Creates a graph from saved GraphDef file and returns a saver.""" # Creates graph from saved graph_def.pb. with tf.gfile.FastGFile(os.path.join( model_dir, 'classify_image_graph_def.pb'), 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='') def run_inference_on_image(image): """Runs forward path on an image. Args: image: Image file name. Returns: off the shelf 2048 feature vector """ if not tf.gfile.Exists(image): tf.logging.fatal('File does not exist %s', image) image_data = tf.gfile.FastGFile(image, 'rb').read() with tf.Session() as sess: # Some useful tensors: # 'softmax:0': A tensor containing the normalized prediction across # 1000 labels. # 'pool_3:0': A tensor containing the next-to-last layer containing 2048 # float description of the image. # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG # encoding of the image. # Runs the softmax tensor by feeding the image_data as input to the graph. pool3 = sess.graph.get_tensor_by_name('pool_3:0') features = sess.run(pool3, {'DecodeJpeg/contents:0': image_data}) return features def maybe_download_and_extract(): """Download and extract model tar file.""" dest_directory = model_dir if not os.path.exists(dest_directory): os.makedirs(dest_directory) filename = DATA_URL.split('/')[-1] filepath = os.path.join(dest_directory, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % ( filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(dest_directory) maybe_download_and_extract() # Creates graph from saved GraphDef. create_graph() feature_filename = '%s-feature-inception-%d.p'%(image_path.split('/')[-2], no_of_images) if os.path.exists(feature_filename): with open(feature_filename, 'rb') as f: features, image_names = pickle.load(f) else: features = np.zeros([no_of_images, 2048]) for i in xrange(no_of_images): print('image name: %s index: %d/%d' %(image_names[i], i, no_of_images)) features[i, :] = run_inference_on_image(image=image_names[i]).squeeze() with open(feature_filename, 'wb') as f: pickle.dump((features, image_names), f) # raw image pixels resized to 100x100 if feature_extraction == 'raw': print('using %s network/method for feature extraction'%feature_extraction) features = np.zeros([no_of_images, 100*100]) for i, name in enumerate(image_names): features[i, :] = np.asarray(Image.open(name).resize((100, 100)).convert('L')).reshape(-1,) # vgg face pretrained network if feature_extraction == 'vggfaces': print('using %s network/method for feature extraction'%feature_extraction) # Convolution Features features = np.zeros([no_of_images, 2048]) vgg_model_conv = VGGFace(include_top=False, input_shape=(224, 224, 3), pooling='avg') # pooling: None, avg or max # FC7 Features vgg_model = VGGFace() # pooling: None, avg or max out = vgg_model.get_layer('fc7').output vgg_model_fc7 = Model(vgg_model.input, out) feature_filename = '%s-feature-vggfaces-%d.p'%(image_path.split('/')[-2], no_of_images) if os.path.exists(feature_filename): with open(feature_filename, 'rb') as f: features, image_names = pickle.load(f) else: features = np.zeros([no_of_images, 4096]) for i, name in enumerate(image_names): img = image.load_img(name, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = utils.preprocess_input(x) print('image name: %s progress: %d/%d'%(name, i, no_of_images)) features[i, :] = vgg_model_fc7.predict(x) with open(feature_filename, 'wb') as f: pickle.dump((features, image_names), f) # use tsne to cluster images in 2 dimensions tsne = TSNE() reduced = tsne.fit_transform(features) reduced_transformed = reduced - np.min(reduced, axis=0) reduced_transformed /= np.max(reduced_transformed, axis=0) image_xindex_sorted = np.argsort(np.sum(reduced_transformed, axis=1)) # draw all images in a merged image merged_width = int(np.ceil(np.sqrt(no_of_images))*image_width) merged_image = np.zeros((merged_width, merged_width, 3), dtype='uint8') for counter, index in enumerate(image_xindex_sorted): # set location if ellipside: a = np.ceil(reduced_transformed[counter, 0] * (merged_width-image_width-1)+1) b = np.ceil(reduced_transformed[counter, 1] * (merged_width-image_width-1)+1) a = int(a - np.mod(a-1,image_width) + 1) b = int(b - np.mod(b-1,image_width) + 1) if merged_image[a,b,0] != 0: continue image_address = image_names[counter] img = np.asarray(Image.open(image_address).resize((image_width, image_width))) merged_image[a:a+image_width, b:b+image_width,:] = img[:,:,:3] else: b = int(np.mod(counter, np.sqrt(no_of_images))) a = int(np.mod(counter//np.sqrt(no_of_images), np.sqrt(no_of_images))) image_address = image_names[index] img = np.asarray(Image.open(image_address).resize((image_width, image_width))) merged_image[a*image_width:(a+1)*image_width, b*image_width:(b+1)*image_width,:] = img[:,:,:3] plt.imshow(merged_image) plt.show() merged_image = Image.fromarray(merged_image) if ellipside: merged_image.save('merged-%s-ellipsoide-inception.png'%image_path.split('/')[-2]) else: merged_image.save('merged-%s.png'%image_path.split('/')[-2]) a = np.array([[1,2], [2,6], [3,9], [9,3], [1,5]], dtype=float) print(a) normalized_a = a - np.min(a, axis=0) normalized_a /= np.max(normalized_a, axis=0) print(normalized_a) # reduced_transformed = reduced - np.min(reduced, axis=0) # reduced_transformed /= np.max(reduced_transformed, axis=0) # image_xindex_sorted = np.argsort(np.sum(reduced_transformed, axis=1)) ```
github_jupyter
# Udacity Machine Learning Nanodegree # Capstone Project Hello there! This is my capstone project on building a model to make better prosthetics. This project is for an open source prosthetic control system which would enable prosthetic devices to have multiple degrees of freedom. https://github.com/cyber-punk-me The system is built of several components. It connects a muscle activity (EMG, Electromyography) sensor to a user Android/Android Things App. The app collects data, then a server builds a model specifically for this user. After that the model can be downloaded and executed on the device to control motors or other appendages. This dataset can be used to map user residual muscle gestures to certain actions of a prosthetic such as open/close hand or rotate wrist. This document is divided into 4 parts: - Data Exploration - Data Preprocessing - Evaluating & Comparing Models - Model Tuning ## 1. Data Exploration In this section, we will look at the type of the data we are dealing with, and some visualizations that shall help ius better understand the data we are working with. In addition, we shall load the data and process it into a matter suitable for performing the above operations. ### 1.1 Loading the data ``` # Import libraries necessary for this project import numpy as np import pandas as pd from time import time import seaborn as sns from IPython.display import display # Allows the use of display() for DataFrames import matplotlib.pyplot as plt import pandas from pandas.plotting import scatter_matrix ``` Our data is present in 4 separate files, one for each class. First, we shall load them all into dataframes and take a peek into each of the datasets. ``` # read dataframes - header=None is given as we do not have the headers in the .csv files data0 = pd.read_csv("0.csv", header=None) data1 = pd.read_csv("1.csv", header=None) data2 = pd.read_csv("2.csv", header=None) data3 = pd.read_csv("3.csv", header=None) # Display the first record display(data0.head(n=1)) display(data1.head(n=1)) display(data2.head(n=1)) display(data3.head(n=1)) ``` As we can see above, the last column contains the category we are attempting to clasify i.e the target variable. Now, we need to combine these 4 Dataframes that we have into 1 big Dataframe so we can visualize different features and work further with the data. The below code does that, and also shows us the shape of the resulting dataframe. ``` # append the dataframes into one unified dataset data = [data0, data1, data2, data3] data = pd.concat(data, sort=False) data.shape ``` Now, we take a small peek into the resulting dataset. ``` data.head(n=1) ``` Finally, since we concatenated the separate datsets earlier into out current dataset, this means that the data is arranged by the output variable in the order of concatenation. We need to shuffle the data so that our algorithms do not get too biased in any one way or the other. If we do not do this, in case we are using algorithms like K-fold validation to help our classifiers better model the data, the models will wind up not learning from the data properly as the uneven spread will be problematic. ``` # Shuffle the dataframe to randomize data = data.sample(frac=1) data.shape ``` ## 1.2 Data Visualization Now, we shall visualize the data to get a better understanding of how it is distributed. Before that, we split the data into independant variables X (or features) and the dependant variable y (target variable). ``` X = data.drop([64],axis=1) y = data[[64]] ``` Below, we plot histograms for each of the 64 features we have in out dataset. This will help us get a better understanding of how the data is distributed. ``` %matplotlib inline fig = plt.figure(figsize = (15,20)) ax = fig.gca() X.hist(ax = ax) plt.show() ``` Now, we will plot a heatmap of the features in our dataset. From here, we can intuitively tell if correlations exist between the different features. ``` plt.figure(figsize=(15, 10)) sns.heatmap(X.corr()) plt.xticks(rotation=90) plt.yticks(rotation=0) ``` From the above, we can see that there is not much correlation between the different features in the datasets. This can indicate that doing PCA might not be of much help here, as there is not much correlation to draw off to make the eigenvectors that would be representative of the variance in the data. ## 1.3 Training and Testing Sets Below, We shall spilt the data into train and test sets. ``` # Train test split to get train and test sets from sklearn.model_selection import train_test_split # Split X_tr, X_test, y_tr, y_test = train_test_split(X, y, test_size=0.2, random_state=0) ``` ## 1.4 Benchmark Model ``` from sklearn import tree from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score clf = tree.DecisionTreeClassifier() clf.fit(X_tr, y_tr) y_pred = clf.predict(X_test) print("Accuracy \n") print(accuracy_score(y_pred, y_test)) print("\nF1 Score \n") print(f1_score(y_pred, y_test, average = 'micro')) ``` # 2. Data Preprocessing Here, we shall perform 2 major tasks: - Feature Scaling : In case the As the data is sensor data, and is numerical data reported from different sensors. This means that some features could be disproportionately larger than the others. This would cause issues with the algorithm we’ll be using to model this problem. - Principal Component Analysis: As the data is collected for a whole hand, there are chances that some of the different features might be correlated to one another. This could also mean that the model would be affected. For PCA, we shall test and see how well the correlations come out to be, and if they are useful at all by testing the efficacy of an algorithm pre and post PCA. ## 2.0 Cleaning The Data Here, we shall check to see if there are any empty or null values in our dataset. In case there are, we can replace these by a representative statistic of our choice. (ex: mean, median etc.) ``` X.isnull().sum() ``` As we can see above, we do not have any null values in our dataset. This means that we can go ahead and use it straight away. ## 2.1 Feature Scaling We shall now perform feature scaling on our data. We shall also see a visualization similar to earlier. ``` # Feature Scaling from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() X = data.drop([64],axis=1) rescaled_X = scaler.fit_transform(X) rescaled_X = pd.DataFrame(rescaled_X) rescaled_X.head() X = rescaled_X import matplotlib.pyplot as plt import pandas from pandas.plotting import scatter_matrix %matplotlib inline fig = plt.figure(figsize = (15,20)) ax = fig.gca() X.hist(ax = ax) plt.show() ``` Now, we have scaled all of our features. The graphs above show that they are within the range of [0,1]. ## 2.2 Testing with a model before PCA Based on the data visualization seen above, we can see that the data is now in the range [0,1]. As we are unsure based on the heatmap we plotted earlier as to how well PCA will work, we will check how well the same classifier models the data before and after doing PCA. We shall now train a Random Forest Classifier on the this data. ``` # Train test split to get train and test sets from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score clf = RandomForestClassifier(random_state=100, max_depth=7) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print("Accuracy \n") print(accuracy_score(y_pred, y_test)) print("\nF1 Score \n") print(f1_score(y_pred, y_test, average = 'micro')) ``` So, as we can see above, we get an accuracy of 82% and a F1-score of about 82%. Let us continue by checking how this model performs on out testing set. ## 2.3 PCA Here, we shall be applying PCA onto the data, and checking how well PCA captures the variance of the data. ``` # PCA from sklearn.decomposition import PCA pca = PCA(n_components=32) pca.fit(X) print(pca.explained_variance_ratio_) print(sum(pca.explained_variance_ratio_)) ``` The feature with the highest variance in the data captures ~6% of the variance in the data, which is not a great result. This corroborates what we saw in the heatmap earlier - there is not much correlation between the features in this dataset. ``` x = X nx = pca.transform(X) X=pd.DataFrame(nx) y = data[[64]] ``` ## 2.4 Testing with a model after PCA So, as we can see above, the features obtained as a result of PCA do not seem to help in reducing the dimensionality of the data, nor do they seem to adequately capture the variance in our dataset. Let us continue by checking how the same Random Forest performs on the features obtained through PCA. ``` # Train test split to get train and test sets from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score clf = RandomForestClassifier(random_state=42, max_depth=7) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print("Accuracy \n") print(accuracy_score(y_pred, y_test)) print("\nF1 Score \n") print(f1_score(y_pred, y_test, average = 'micro')) ``` So, it looks like PCA did not help much after all in our case. In fact, it has reduced the accuracy of our classifier. In this scenario, it would be better to leave the cleaned data as it is and not perform PCA. ``` X = rescaled_X X.head() ``` We shall save the cleaned data here as we may be requiring it later. ``` # Train test split to get train and test sets from sklearn.model_selection import train_test_split # First time X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) SaveX_train = X_train SaveX_test = X_test Savey_train = y_train Savey_test = y_test ``` # 3. Evaluating and Comparing Models Now, we shall proceed to take several different classifiers, and use these to model the data. Then we shall compare how well they perform on the data. We shall be checking with the following models: - Logistic Regression - Decision Trees - Random Forests - AdaBoost For the above models, we can use K-fold cross validation to make sure we do not waste any of the training set on cross-validation, which will help our models learn better as they wil have a larger training set available to them. We shall also experiment with a Deep Learning Multi Layer Perceptron(MLP) and see how well this performs. To compare the models, we shall use 2 metrics: - Accuracy - F1 Score ## 3.1 Model Comparison We are not sure which models will perform well. So, we shall try and compare a number of models to see which one performs the best. ``` from sklearn import model_selection from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import f1_score from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn import metrics scoring = ['accuracy', 'f1_macro'] models = [] models.append(('1. Logistic Regression', LogisticRegression())) models.append(('2. Decision Tree', DecisionTreeClassifier())) models.append(('3. Random Forest', RandomForestClassifier(n_estimators=100))) models.append(('4. AdaBoost', AdaBoostClassifier(RandomForestClassifier(n_estimators=100)))) op = "" for name, model in models: kfold = model_selection.KFold(n_splits = 4, random_state = 47, shuffle=True) cv_results = model_selection.cross_validate(model, X_train, y_train, cv = kfold, scoring=scoring, return_train_score=True ) print(name+"\nThe accuracy and F1 score are:\n") op+=name+"\nThe accuracy and F1 score are:\n" for met in scoring: key = 'test_'+met print(np.mean(cv_results[key])) op=op+str(np.mean(cv_results[key]))+"\n" print(op) ``` ## 3.2 MLP Classifier Below, we shall create a Multi Layer Perceptron (MLP) with 3 hidden layers that make use of the ReLu activation function, and with multiple dropout layers. We will one-hot encode the target variable so we can create an output layer for the MLP with 4 nodes that use the softmax activation function. In addition, we shall be splitting the data twice, to get the training, cross-validation and testing sets. ``` from keras.models import Sequential from keras.layers import Dense, Dropout, Activation X = data.drop([64],axis=1) y = data[[64]] from sklearn.preprocessing import OneHotEncoder enc = OneHotEncoder(handle_unknown='ignore') enc.fit(y) enc.categories_ y2=enc.transform(y) y2 = pd.DataFrame(y2.todense()) y2.head() # Train test split twice to get train, cross validation and test sets from sklearn.model_selection import train_test_split # First time X_train, X_test, y_train, y_test = train_test_split(X, y2, test_size=0.2, random_state=0) # Second time X_train, X_crossval, y_train, y_crossval = train_test_split(X_train, y_train, test_size=0.25, random_state=0) # Build the model architecture model = Sequential() model.add(Dense(32, activation="relu", input_shape=(64,))) model.add(Dropout(0.25)) model.add(Dense(16, activation="relu")) model.add(Dropout(.2)) model.add(Dense(8, activation="relu")) model.add(Dropout(.1)) model.add(Dense(4, activation="softmax")) # Compile the model using a loss function and an optimizer. model.compile(loss = "categorical_crossentropy", optimizer='adam', metrics=['accuracy']) model.summary() model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) from keras.callbacks import ModelCheckpoint epochs = 50 checkpointer = ModelCheckpoint(filepath='weights.best.from_scratch.hdf5', verbose=1, save_best_only=True) model.fit(X_train, y_train, validation_data=(X_crossval, y_crossval), epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1) y_pred = model.predict(X_test) print(y_pred) print("Accuracy \n") print(accuracy_score(y_pred.round(), y_test)) print("\nF1 Score \n") print(f1_score(y_pred.round(), y_test, average = 'micro')) ``` So, now we have trained several models. Taking into account that the neural networks take much longer to train and model the data, and that the metrics in the case of the AdaBoost are higher, we shall be picking AdaBoost as our classifier to model this problem. ## 4. Model Tuning So, we have decided on using Adaboost to model our problem. We shall proceed and optimize it further so we can improve its metrics of prediction. ## 4.1 Pre-tuning metrics We take our cleaned data that we used earlier when we were comparing Models. ``` X_train = SaveX_train X_test = SaveX_test y_train = Savey_train y_test = Savey_test clf = AdaBoostClassifier(RandomForestClassifier(n_estimators=100)) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print("Accuracy \n") print(accuracy_score(y_pred.round(), y_test)) print("\nF1 Score \n") print(f1_score(y_pred, y_test, average = 'micro')) print("Confusion Matrix: \n",confusion_matrix(y_test, y_pred)) print("Classification Report: \n",classification_report(y_test, y_pred)) ``` ## 4.2 Tuning Now, we shall proceed to use Grid Search to optimize our Adaboost classifier with different hyperparameters. ``` from sklearn import model_selection from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import f1_score from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn import metrics from sklearn.metrics import make_scorer from sklearn.model_selection import GridSearchCV clf = AdaBoostClassifier(RandomForestClassifier(n_estimators=100)) clf.fit(X_train, y_train) param_dist = { 'n_estimators': [50, 100], 'learning_rate' : [0.01,0.05,0.1,0.3,1, 3, 5, 10] } scorer = make_scorer(accuracy_score) gridsearch = GridSearchCV(clf, param_dist, scoring=scorer) gridsearch.fit(X_train, y_train) print(gridsearch.best_params_) print("===========================================================================================") print(gridsearch.best_score_) ``` Finally, we shall check the final scores achieved by our optimized model. ``` tuned_clf = AdaBoostClassifier(RandomForestClassifier(n_estimators=100), learning_rate=0.1) tuned_clf.fit(X_train, y_train) y_pred = tuned_clf.predict(X_test) print("Accuracy \n") print(accuracy_score(y_pred.round(), y_test)) print("\nF1 Score \n") print(f1_score(y_pred, y_test, average = 'micro')) print("Confusion Matrix: \n",confusion_matrix(y_test, y_pred)) print("Classification Report: \n",classification_report(y_test, y_pred)) #plot graph of feature importances fig = plt.figure(figsize = (15,20)) feat_importances = pd.Series(tuned_clf.feature_importances_, index=X.columns) feat_importances.plot(kind='barh') plt.show() ```
github_jupyter
# Using Twitter API with Tweepy To interface with Twitter API, we can use third-party package such as Tweepy. To use the package, we will need to register and get keys from twitter developer portal. Then, we use these keys to authenticate with OAuth2 to access twitter API. ``` import tweepy import pandas as pd import pytz import yaml, os ``` You can either create a config file 'twitter-config.yml' (in yaml format). Here is an example of the content in the config file <br> ```yaml consumer_key: 'XXXXXXXX' consumer_secret: 'XXXXXXXX' access_token_key: 'XXXXXXXX' access_token_secret: 'XXXXXXXX' ``` or change the key/token values in the else section below. ``` config_file = 'twitter-config.yml' if os.path.isfile(config_file): config = yaml.safe_load(open(config_file)) else: config = { 'consumer_key': 'XXXXXXXX', 'consumer_secret': 'XXXXXXXX', 'access_token_key': 'XXXXXXXX', 'access_token_secret': 'XXXXXXXX' } auth = tweepy.OAuthHandler(config['consumer_key'], config['consumer_secret']) auth.set_access_token(config['access_token_key'], config['access_token_secret']) api = tweepy.API(auth, wait_on_rate_limit=True) ``` Tweepy provides many features: - searching and listing users' information - reading tweets from user timelines - creating, fetching, retweeing tweets - managing followers - adding and removing likes - blocking users - searching and filtering tweets - listing trends - streaming tweets in real-time Let's create a helper function to print a tweet. The function prints only one line containing, timestamp (in BKK timezone), user who creates tweet, and the content of the tweet. ``` def print_tweet(tweet): # tweet.created_at is an "unaware" timezone, but it acutally is a UTC timezone # we will have to make it a UTC first, then convert it to bkk utc_dt = tweet.created_at.replace(tzinfo=pytz.UTC) timezone_bkk = pytz.timezone('Asia/Bangkok') bkk_dt = utc_dt.astimezone(timezone_bkk) print('{} [{}] {}'.format(bkk_dt, tweet.user.name, tweet.text[:50])) ``` ## Getting user's information With get_user method, we can access user's public information such as screen_name, description, followers' count, etc. Please refer to [Python – User object in Tweepy](https://www.geeksforgeeks.org/python-user-object-in-tweepy/) for more details of user object. ``` user = api.get_user(screen_name='katyperry') print('Name:', user.screen_name) print('Description:', user.description) print('Number of Followers:', user.followers_count) print('Number of Followings:', user.friends_count) print('Number of Tweets/Retweets:', user.statuses_count) print('Headshot\'s URL', user.profile_image_url) ``` ## Accessing user timelines We can get tweets, replies, and mentions in user's timeline, as long as it is public. ``` timeline = api.user_timeline(user_id=user.id) for tweet in timeline: print_tweet(tweet) ``` ## Listing Trends Trends are location-oriented. We will have to use location coordinate to get trending information. ``` # Let's use BKK location lat = 13.739060668870644 long = 100.53214799610562 location_info = api.closest_trends(lat, long) location_info ``` Get trending keywords and put them in dataframe ``` trendings = api.get_place_trends(location_info[0]['woeid']) df = pd.DataFrame(trendings[0]['trends']) # sort trending based on their tweet volume in descending order trending_df = df.sort_values('tweet_volume', ascending=False) trending_df # get the keyword with the highest tweet volume top_trending_keyword = trending_df.head(1).iloc[0]['name'] print('Top Trending Keyword = ', top_trending_keyword) for tweet in api.search_tweets(q=top_trending_keyword, count=10): print_tweet(tweet) ``` ## Understanding cursor For Twitter API, those information will be returned with some certain limitations e.g. providing only 20 followers. If you want more complete list, we will have to use Cursor API. Cursor API is basically a paging mechanism. ``` user_id = 'natawutn' user = api.get_user(screen_name=user_id) print('Name:', user.screen_name) print('Number of Followers:', user.followers_count) print('Number of Followings:', user.friends_count) print('Number of Tweets/Retweets:', user.statuses_count) for friend in user.friends(): print('{:15.15} -- {:40.40}'.format(friend.screen_name, friend.description)) # Iterate through all of the account friends (but not more than 30) counter = 0 for id in tweepy.Cursor(api.get_friend_ids, screen_name=user_id).items(): # Process the friend here friend = api.get_user(user_id=id) print('{:15.15} -- {:40.40}'.format(friend.screen_name, friend.description)) counter += 1 if counter >= 30: break ```
github_jupyter
# Nu-Support Vector Classification with RobustScaler ### Required Packages ``` !pip install imblearn import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as se import warnings from sklearn.model_selection import train_test_split from imblearn.over_sampling import RandomOverSampler from sklearn.svm import NuSVC from sklearn.preprocessing import LabelEncoder,RobustScaler from sklearn.pipeline import make_pipeline from sklearn.metrics import classification_report,plot_confusion_matrix warnings.filterwarnings('ignore') ``` ### Initialization Filepath of CSV file ``` #filepath file_path= " " ``` List of features which are required for model training . ``` #x_values features=[ ] ``` Target variable for prediction. ``` #y_value target=" " ``` ### Data Fetching Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools. We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ``` df=pd.read_csv(file_path) df.head() ``` ### Feature Selections It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model. We will assign all the required input features to X and target/outcome to Y. ``` X=df[features] Y=df[target] ``` ### Data Preprocessing Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ``` def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) def EncodeY(df): if len(df.unique())<=2: return df else: un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort') df=LabelEncoder().fit_transform(df) EncodedT=[xi for xi in range(len(un_EncodedT))] print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT)) return df ``` Calling preprocessing functions on the feature and target set. ``` x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=EncodeY(NullClearner(Y)) X.head() ``` #### Correlation Map In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ``` f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ``` #### Distribution Of Target Variable ``` plt.figure(figsize = (10,6)) se.countplot(Y) ``` ### Data Splitting The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ``` x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123) ``` #### Handling Target Imbalance The challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important. One approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library. ``` x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train) ``` ### Model Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection. A Support Vector Machine is a discriminative classifier formally defined by a separating hyperplane. In other terms, for a given known/labelled data points, the SVM outputs an appropriate hyperplane that classifies the inputted new cases based on the hyperplane. In 2-Dimensional space, this hyperplane is a line separating a plane into two segments where each class or group occupied on either side. SVC and NuSVC are similar methods, but accept slightly different sets of parameters and have different mathematical formulations. * #### Model Tuning Parameters > - nu -> An upper bound on the fraction of margin errors and a lower bound of the fraction of support vectors. Should be in the interval (0, 1]. > - kernel -> Specifies the kernel type to be used in the algorithm. It must be one of ‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’ or a callable. If none is given, ‘rbf’ will be used. If a callable is given it is used to pre-compute the kernel matrix from data matrices; that matrix should be an array of shape (n_samples, n_samples). > - gamma -> Gamma is a hyperparameter that we have to set before the training model. Gamma decides how much curvature we want in a decision boundary. > - degree -> Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels.Using degree 1 is similar to using a linear kernel. Also, increasing degree parameter leads to higher training times. ###Robust Scaler Standardization of a dataset is a common requirement for many machine learning estimators. Typically this is done by removing the mean and scaling to unit variance. However, outliers can often influence the sample mean / variance in a negative way. In such cases, the median and the interquartile range often give better results. The Robust Scaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). ``` model=make_pipeline(RobustScaler(),NuSVC(random_state=123)) model.fit(x_train,y_train) ``` #### Model Accuracy score() method return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. ``` print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100)) ``` #### Confusion Matrix A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known. ``` plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues) ``` #### Classification Report A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False. * where: - Precision:- Accuracy of positive predictions. - Recall:- Fraction of positives that were correctly identified. - f1-score:- percent of positive predictions were correct - support:- Support is the number of actual occurrences of the class in the specified dataset. ``` print(classification_report(y_test,model.predict(x_test))) ``` #### Creator: Ageer Hari krishna , Github: [Profile](https://github.com/ageerHarikrishna)
github_jupyter
## Keywords using RAKE We will use the RAKE algorithm (A Python implementation of the Rapid Automatic Keyword Extraction (RAKE) algorithm as described in: Rose, S., Engel, D., Cramer, N., & Cowley, W. (2010). Automatic Keyword Extraction from Individual Documents. In M. W. Berry & J. Kogan (Eds.), Text Mining: Theory and Applications: John Wiley & Sons.). Idea of RAKE is as follows (for each file). * Split the text into sentences. * Split each sentence into phrases, separated by stopwords. * Score words in each phrase. * Order phrases by score. * (Addition) filter out 2-3 word phrases. * (Addition) retain only top scoring phrases. Finally, run this across all files and retain the most frequent keywords. ``` import matplotlib.pyplot as plt import operator import os import nltk import re import spacy import sqlite3 import string from nltk.corpus import stopwords %matplotlib inline DATA_DIR = "../data" TEXTFILES_DIR = os.path.join(DATA_DIR, "textfiles") RAKE_KEYWORDS = os.path.join(DATA_DIR, "rake_keywords.tsv") WORDCOUNTS_DB = os.path.join(DATA_DIR, "wordcounts.db") CANDIDATE_RAKE = os.path.join(DATA_DIR, "candidate_rake.tsv") ``` ### Apply RAKE to single file Very heavily based on [code from aneesha/RAKE](https://github.com/aneesha/RAKE/blob/master/rake.py), main difference is that we are using SpaCy for sentence tokenization. ### Extract stopwords from NLTK NLTK provides a list of English stopwords which we will use here to build a regular expression that will be used to split sentences into phrases. ``` def build_stop_word_regex(): stop_word_list = stopwords.words("english") stop_word_regex_list = [] for word in stop_word_list: word_regex = r'\b' + word + r'(?![\w-])' # added look ahead for hyphen stop_word_regex_list.append(word_regex) stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE) return stop_word_pattern stop_word_pattern = build_stop_word_regex() print(stop_word_pattern) ``` ### Split text to sentences ``` nlp = spacy.load("en") def split_sentences(filename, show_debug=False): fin = open(filename, "r") text = fin.read() fin.close() sentence_list = [] doc = nlp(text) i = 0 for sent in doc.sents: if show_debug and i % 100 == 0: print("{:d} sentences added".format(i)) # tokens include whitespace separator sentence = " ".join(token.string for token in sent) sentence = re.sub("\n", " ", sentence) # squeeze out additional whitespace sentence = re.sub("\s+", " ", sentence) sentence_list.append(sentence) i += 1 if show_debug: print("{:d} sentences added, COMPLETE".format(i)) return sentence_list sentence_list = split_sentences(os.path.join(TEXTFILES_DIR, "1.txt"), True) ``` ### Sentences to Phrases Phrases are "runs of words" delimited by stopwords. ``` def generate_phrases(sentence_list, stopword_pattern): phrase_list = [] for s in sentence_list: tmp = re.sub(stopword_pattern, '|', s.strip()) phrases = tmp.split("|") for phrase in phrases: phrase = phrase.strip().lower() if phrase != "": phrase_list.append(phrase) return phrase_list phrase_list = generate_phrases(sentence_list, stop_word_pattern) print("{:d} phrases".format(len(phrase_list))) ``` ### Calculate word scores Word scores are generated based on their frequency and degree. ``` def is_number(s): try: float(s) if '.' in s else int(s) return True except ValueError: return False def separate_words(text, min_word_return_size): splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]') words = [] for single_word in splitter.split(text): current_word = single_word.strip().lower() # leave numbers in phrase, but don't count as words, # since they tend to invalidate scores of their phrases if (len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word)): words.append(current_word) return words def calculate_word_scores(phrase_list): word_frequency = {} word_degree = {} for phrase in phrase_list: word_list = separate_words(phrase, 0) word_list_length = len(word_list) word_list_degree = word_list_length - 1 #if word_list_degree > 3: word_list_degree = 3 #exp. for word in word_list: word_frequency.setdefault(word, 0) word_frequency[word] += 1 word_degree.setdefault(word, 0) word_degree[word] += word_list_degree #orig. #word_degree[word] += 1/(word_list_length*1.0) #exp. for item in word_frequency: word_degree[item] = word_degree[item] + word_frequency[item] # Calculate Word scores = deg(w)/frew(w) word_score = {} for item in word_frequency: word_score.setdefault(item, 0) word_score[item] = word_degree[item] / (word_frequency[item] * 1.0) #orig. #word_score[item] = word_frequency[item]/(word_degree[item] * 1.0) #exp. return word_score word_scores = calculate_word_scores(phrase_list) print(len(word_scores)) ``` ### Assign phrase scores based on word scores Finally, filter candidates generated using some heuristics. ``` def generate_candidate_keyword_scores(phrase_list, word_scores): keyword_candidates = {} for phrase in phrase_list: keyword_candidates.setdefault(phrase, 0) word_list = separate_words(phrase, 0) candidate_score = 0 for word in word_list: candidate_score += word_scores[word] keyword_candidates[phrase] = candidate_score return keyword_candidates candidate_keywords_scored = generate_candidate_keyword_scores(phrase_list, word_scores) len(candidate_keywords_scored) def filter_sort_top_keywords(candidate_keywords_scored, min_words=2, max_words=3, score_cutoff=0.0): top_keywords = sorted(candidate_keywords_scored.items(), key=operator.itemgetter(1), reverse=True) already_seen = set() candidate_keywords = [] for top_keyword, score in top_keywords: if score <= score_cutoff: break top_keyword = re.sub("\s+", " ", top_keyword) words = top_keyword.split(" ") # only keep 2 and 3 word keywords num_words = len(words) if num_words < min_words or num_words > max_words: continue # remove any single char words words_filtered = [w for w in words if len(w) > 1] top_keyword = " ".join(words_filtered) # remove extra punctuation chars top_keyword = "".join([c for c in top_keyword if c not in string.punctuation]) top_keyword = re.sub("\s+", " ", top_keyword) top_keyword = top_keyword.strip() # don't repeat keywords if top_keyword in already_seen: continue candidate_keywords.append((top_keyword, score)) already_seen.add(top_keyword) return candidate_keywords candidate_keywords = filter_sort_top_keywords(candidate_keywords_scored) i = 0 for keyword, score in candidate_keywords: if i <= 10: print("{:6.3f}".format(score), keyword) i += 1 ``` ### Plot results to determine cutoff We can run this multiple times to generate a decent cutoff. Based on running these scores across a few files, we determine that a good cutoff is 5. ``` plt.plot([k[1] for k in candidate_keywords]) plt.xlabel("number of keywords") plt.ylabel("score") ``` ### Apply code to all files in corpus We will rerun the above code for all the files in our corpus and generate a file of raw keywords generated by RAKE. ``` if not os.path.exists(RAKE_KEYWORDS): frak = open(RAKE_KEYWORDS, "w") i = 0 for filename in os.listdir(TEXTFILES_DIR): if i % 100 == 0: print("{:d} files processed".format(i)) doc_id = int(filename.split(".")[0]) textfile_name = os.path.join(TEXTFILES_DIR, filename) sentence_list = split_sentences(textfile_name) phrase_list = generate_phrases(sentence_list, stop_word_pattern) word_scores = calculate_word_scores(phrase_list) candidate_keywords_scored = generate_candidate_keyword_scores(phrase_list, word_scores) candidate_keywords = filter_sort_top_keywords(candidate_keywords_scored, score_cutoff=5.0) for keyword, score in candidate_keywords: frak.write("{:d}\t{:s}\t{:.3f}\n".format(doc_id, keyword, score)) i += 1 print("{:d} files processed, COMPLETE".format(i)) frak.close() ``` ### Load into DB for grouping ``` def table_exists(conn, table_name): cur = conn.cursor() cur.execute("select name from sqlite_master where type='table' and name = ?", [table_name]) rows = cur.fetchall() cur.close() return len(rows) > 0 def create_rake_table(conn): if not table_exists(conn, "rake"): cur = conn.cursor() create_table = """create table rake( id INTEGER NOT NULL, keyword VARCHAR(50) NOT NULL, doc_id INTEGER NOT NULL) """ cur.execute(create_table) cur.close() def index_exists(conn, index_name): cur = conn.cursor() cur.execute("select name from sqlite_master where type='index' and name = ?", [index_name]) rows = cur.fetchall() cur.close() return len(rows) > 0 def create_rake_indexes(conn): cur = conn.cursor() index_names = ["ix_rake", "ax1_rake", "ax2_rake"] create_indexes = [ "create unique index ix_rake on rake(id)", "create index ax1_rake on rake(keyword)", "create index ax2_rake on rake(doc_id)" ] for index_name, create_index in zip(index_names, create_indexes): if not index_exists(conn, index_name): cur.execute(create_index) cur.close() def insert_keyword(conn, id, keyword, doc_id, commit=False): cur = conn.cursor() cur.execute("insert into rake(id, keyword, doc_id) values (?, ?, ?)", [id, keyword, doc_id]) if commit: conn.commit() cur.close() def count_words_in_table(conn): cur = conn.cursor() cur.execute("select count(*) as cnt from rake") rows = cur.fetchone() return int(rows[0]) cur.close() conn = sqlite3.connect(WORDCOUNTS_DB) create_rake_table(conn) num_keywords = count_words_in_table(conn) keyword_id = 0 should_commit = False if num_keywords == 0: frake = open(RAKE_KEYWORDS, "r") for line in frake: if keyword_id % 100 == 0: print("{:d} records inserted".format(keyword_id)) should_commit = True doc_id, keyword, _ = line.strip().split("\t") if len(keyword.strip()) == 0: continue insert_keyword(conn, keyword_id, keyword, doc_id, should_commit) should_commit = False keyword_id += 1 frake.close() print("{:d} records inserted, COMPLETE".format(keyword_id)) conn.commit() create_rake_indexes(conn) cur = conn.cursor() cur.execute(""" select keyword, count(*) as freq from rake group by keyword having freq > 5 order by freq desc """) candidate_keywords = [] rows = cur.fetchall() i = 0 for keyword, freq in rows: words = keyword.split(" ") num_words = len(words) if num_words < 2: # for search, we don't care about unigrams continue words_filtered = [w for w in words if len(w.strip()) > 0] if num_words < 2: continue candidate_keywords.append((keyword, freq)) if i <= 10: print(keyword, freq) i += 1 ``` ### Plot frequency distribution to find cutoff ``` plt.plot([ck[1] for ck in candidate_keywords]) plt.xlabel("keywords") plt.ylabel("frequency") ``` ### Write out top 2500 keywords ``` fcrk = open(CANDIDATE_RAKE, "w") for keyword, freq in candidate_keywords[0:2500]: fcrk.write("{:s}\t{:d}\n".format(keyword, freq)) fcrk.close() ```
github_jupyter
# <div style="text-align: center"> Santander ML Explainability </div> ### <div style="text-align: center">CLEAR DATA. MADE MODEL. </div> <img src='https://galeria.bankier.pl/p/b/5/215103d7ace468-645-387-261-168-1786-1072.jpg' width=600 height=600> <div style="text-align:center"> last update: <b> 10/03/2019</b></div> You can Fork code and Follow me on: > ###### [ GitHub](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist) > ###### [Kaggle](https://www.kaggle.com/mjbahmani/) ------------------------------------------------------------------------------------------------------------- <b>I hope you find this kernel helpful and some <font color='red'>UPVOTES</font> would be very much appreciated.</b> ----------- <a id="top"></a> <br> ## Notebook Content 1. [Introduction](#1) 1. [Load packages](#2) 1. [import](21) 1. [Setup](22) 1. [Version](23) 1. [Problem Definition](#3) 1. [Problem Feature](#31) 1. [Aim](#32) 1. [Variables](#33) 1. [Evaluation](#34) 1. [Exploratory Data Analysis(EDA)](#4) 1. [Data Collection](#41) 1. [Visualization](#42) 1. [Data Preprocessing](#43) 1. [Machine Learning Explainability for Santander](#5) 1. [Permutation Importance](#51) 1. [How to calculate and show importances?](#52) 1. [What can be inferred from the above?](#53) 1. [Partial Dependence Plots](#54) 1. [Model Development](#6) 1. [lightgbm](#61) 1. [RandomForestClassifier](#62) 1. [DecisionTreeClassifier](#63) 1. [CatBoostClassifier](#64) 1. [Funny Combine](#65) 1. [References](#7) <a id="1"></a> <br> ## 1- Introduction At [Santander](https://www.santanderbank.com) their mission is to help people and businesses prosper. they are always looking for ways to help our customers understand their financial health and identify which products and services might help them achieve their monetary goals. <img src='https://www.smava.de/kredit/wp-content/uploads/2015/12/santander-bank.png' width=400 height=400> In this kernel we are going to create a **Machine Learning Explainability** for **Santander** based this perfect [course](https://www.kaggle.com/learn/machine-learning-explainability) in kaggle. ><font color="red"><b>Note: </b></font> how to extract **insights** from models? <a id="2"></a> <br> ## 2- A Data Science Workflow for Santander Of course, the same solution can not be provided for all problems, so the best way is to create a **general framework** and adapt it to new problem. **You can see my workflow in the below image** : <img src="http://s8.picofile.com/file/8342707700/workflow2.png" /> **You should feel free to adjust this checklist to your needs** ###### [Go to top](#top) <a id="2"></a> <br> ## 2- Load packages <a id="21"></a> <br> ## 2-1 Import ``` from sklearn.model_selection import train_test_split from sklearn.model_selection import StratifiedKFold from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from catboost import CatBoostClassifier,Pool from IPython.display import display import matplotlib.patches as patch import matplotlib.pyplot as plt from sklearn.svm import NuSVR from scipy.stats import norm from sklearn import svm import lightgbm as lgb import xgboost as xgb import seaborn as sns import pandas as pd import numpy as np import warnings import time import glob import sys import os import gc ``` <a id="22"></a> <br> ## 2-2 Setup ``` # for get better result chage fold_n to 5 fold_n=5 folds = StratifiedKFold(n_splits=fold_n, shuffle=True, random_state=10) %matplotlib inline %precision 4 warnings.filterwarnings('ignore') plt.style.use('ggplot') np.set_printoptions(suppress=True) pd.set_option("display.precision", 15) ``` <a id="23"></a> <br> ## 2-3 Version ``` print('pandas: {}'.format(pd.__version__)) print('numpy: {}'.format(np.__version__)) print('Python: {}'.format(sys.version)) ``` <a id="3"></a> <br> ## 3- Problem Definition In this **challenge**, we should help this **bank** identify which **customers** will make a **specific transaction** in the future, irrespective of the amount of money transacted. The data provided for this competition has the same structure as the real data we have available to solve this **problem**. <a id="31"></a> ### 3-1 Problem Feature 1. train.csv - the training set. 1. test.csv - the test set. The test set contains some rows which are not included in scoring. 1. sample_submission.csv - a sample submission file in the correct format. <a id="32"></a> ### 3-2 Aim In this competition, The task is to predict the value of **target** column in the test set. <a id="33"></a> ### 3-3 Variables We are provided with an **anonymized dataset containing numeric feature variables**, the binary **target** column, and a string **ID_code** column. The task is to predict the value of **target column** in the test set. <a id="34"></a> ## 3-4 evaluation **Submissions** are evaluated on area under the [ROC curve](http://en.wikipedia.org/wiki/Receiver_operating_characteristic) between the predicted probability and the observed target. <img src='https://upload.wikimedia.org/wikipedia/commons/6/6b/Roccurves.png' width=300 height=300> ``` from sklearn.metrics import roc_auc_score, roc_curve ``` <a id="4"></a> ## 4- Exploratory Data Analysis(EDA) In this section, we'll analysis how to use graphical and numerical techniques to begin uncovering the structure of your data. * Data Collection * Visualization * Data Preprocessing * Data Cleaning <img src="http://s9.picofile.com/file/8338476134/EDA.png" width=400 height=400> <a id="41"></a> <br> ## 4-1 Data Collection ``` print(os.listdir("../input/")) # import Dataset to play with it train= pd.read_csv("../input/train.csv") test = pd.read_csv('../input/test.csv') sample_submission = pd.read_csv('../input/sample_submission.csv') sample_submission.head() train.shape, test.shape, sample_submission.shape train.head(5) ``` # Reducing memory size by ~50% Because we make a lot of calculations in this kernel, we'd better reduce the size of the data. 1. 300 MB before Reducing 1. 150 MB after Reducing ``` #Based on this great kernel https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65 def reduce_mem_usage(df): start_mem_usg = df.memory_usage().sum() / 1024**2 print("Memory usage of properties dataframe is :",start_mem_usg," MB") NAlist = [] # Keeps track of columns that have missing values filled in. for col in df.columns: if df[col].dtype != object: # Exclude strings # Print current column type print("******************************") print("Column: ",col) print("dtype before: ",df[col].dtype) # make variables for Int, max and min IsInt = False mx = df[col].max() mn = df[col].min() # Integer does not support NA, therefore, NA needs to be filled if not np.isfinite(df[col]).all(): NAlist.append(col) df[col].fillna(mn-1,inplace=True) # test if column can be converted to an integer asint = df[col].fillna(0).astype(np.int64) result = (df[col] - asint) result = result.sum() if result > -0.01 and result < 0.01: IsInt = True # Make Integer/unsigned Integer datatypes if IsInt: if mn >= 0: if mx < 255: df[col] = df[col].astype(np.uint8) elif mx < 65535: df[col] = df[col].astype(np.uint16) elif mx < 4294967295: df[col] = df[col].astype(np.uint32) else: df[col] = df[col].astype(np.uint64) else: if mn > np.iinfo(np.int8).min and mx < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif mn > np.iinfo(np.int16).min and mx < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif mn > np.iinfo(np.int32).min and mx < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif mn > np.iinfo(np.int64).min and mx < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) # Make float datatypes 32 bit else: df[col] = df[col].astype(np.float32) # Print new column type print("dtype after: ",df[col].dtype) print("******************************") # Print final result print("___MEMORY USAGE AFTER COMPLETION:___") mem_usg = df.memory_usage().sum() / 1024**2 print("Memory usage is: ",mem_usg," MB") print("This is ",100*mem_usg/start_mem_usg,"% of the initial size") return df, NAlist ``` Reducing for train data set ``` train, NAlist = reduce_mem_usage(train) print("_________________") print("") print("Warning: the following columns have missing values filled with 'df['column_name'].min() -1': ") print("_________________") print("") print(NAlist) ``` Reducing for test data set ``` test, NAlist = reduce_mem_usage(test) print("_________________") print("") print("Warning: the following columns have missing values filled with 'df['column_name'].min() -1': ") print("_________________") print("") print(NAlist) ``` <a id="41"></a> <br> ## 4-1-1Data set fields ``` train.columns print(len(train.columns)) print(train.info()) ``` <a id="422"></a> <br> ## 4-2-2 numerical values Describe ``` train.describe() ``` <a id="42"></a> <br> ## 4-2 Visualization <a id="421"></a> ## 4-2-1 hist ``` train['target'].value_counts().plot.bar(); f,ax=plt.subplots(1,2,figsize=(20,10)) train[train['target']==0].var_0.plot.hist(ax=ax[0],bins=20,edgecolor='black',color='red') ax[0].set_title('target= 0') x1=list(range(0,85,5)) ax[0].set_xticks(x1) train[train['target']==1].var_0.plot.hist(ax=ax[1],color='green',bins=20,edgecolor='black') ax[1].set_title('target= 1') x2=list(range(0,85,5)) ax[1].set_xticks(x2) plt.show() ``` <a id="422"></a> <br> ## 4-2-2 Mean Frequency ``` train[train.columns[2:]].mean().plot('hist');plt.title('Mean Frequency'); ``` <a id="423"></a> ## 4-2-3 countplot ``` f,ax=plt.subplots(1,2,figsize=(18,8)) train['target'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True) ax[0].set_title('target') ax[0].set_ylabel('') sns.countplot('target',data=train,ax=ax[1]) ax[1].set_title('target') plt.show() ``` <a id="424"></a> ## 4-2-4 hist If you check histogram for all feature, you will find that most of them are so similar ``` train["var_0"].hist(); train["var_81"].hist(); train["var_2"].hist(); ``` <a id="426"></a> ## 4-2-6 distplot The target in data set is **imbalance** ``` sns.set(rc={'figure.figsize':(9,7)}) sns.distplot(train['target']); ``` <a id="427"></a> ## 4-2-7 violinplot ``` sns.violinplot(data=train,x="target", y="var_0") sns.violinplot(data=train,x="target", y="var_81") ``` <a id="43"></a> <br> ## 4-3 Data Preprocessing Before we start this section let me intrduce you, some other compitation that they were similar to this: 1. https://www.kaggle.com/artgor/how-to-not-overfit 1. https://www.kaggle.com/c/home-credit-default-risk 1. https://www.kaggle.com/c/porto-seguro-safe-driver-prediction <a id="431"></a> <br> ## 4-3-1 Check missing data for test & train ``` def check_missing_data(df): flag=df.isna().sum().any() if flag==True: total = df.isnull().sum() percent = (df.isnull().sum())/(df.isnull().count()*100) output = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) data_type = [] # written by MJ Bahmani for col in df.columns: dtype = str(df[col].dtype) data_type.append(dtype) output['Types'] = data_type return(np.transpose(output)) else: return(False) check_missing_data(train) check_missing_data(test) ``` <a id="432"></a> <br> ## 4-3-2 Binary Classification ``` train['target'].unique() ``` <a id="433"></a> <br> ## 4-3-3 Is data set imbalance? A large part of the data is unbalanced, but **how can we solve it?** ``` train['target'].value_counts() def check_balance(df,target): check=[] # written by MJ Bahmani for binary target print('size of data is:',df.shape[0] ) for i in [0,1]: print('for target {} ='.format(i)) print(df[target].value_counts()[i]/df.shape[0]*100,'%') ``` 1. **Imbalanced dataset** is relevant primarily in the context of supervised machine learning involving two or more classes. 1. **Imbalance** means that the number of data points available for different the classes is different <img src='https://www.datascience.com/hs-fs/hubfs/imbdata.png?t=1542328336307&width=487&name=imbdata.png'> [Image source](http://api.ning.com/files/vvHEZw33BGqEUW8aBYm4epYJWOfSeUBPVQAsgz7aWaNe0pmDBsjgggBxsyq*8VU1FdBshuTDdL2-bp2ALs0E-0kpCV5kVdwu/imbdata.png) ``` check_balance(train,'target') ``` ## 4-3-4 skewness and kurtosis ``` #skewness and kurtosis print("Skewness: %f" % train['target'].skew()) print("Kurtosis: %f" % train['target'].kurt()) ``` <a id="5"></a> <br> # 5- Machine Learning Explainability for Santander In this section, I want to try extract **insights** from models with the help of this excellent [**Course**](https://www.kaggle.com/learn/machine-learning-explainability) in Kaggle. The Goal behind of ML Explainability for Santander is: 1. All features are senseless named.(var_1, var2,...) but certainly the importance of each one is different! 1. Extract insights from models. 1. Find the most inmortant feature in models. 1. Affect of each feature on the model's predictions. <img src='http://s8.picofile.com/file/8353215168/ML_Explain.png'> As you can see from the above, we will refer to three important and practical concepts in this section and try to explain each of them in detail. <a id="51"></a> <br> ## 5-1 Permutation Importance In this section we will answer following question: 1. What features have the biggest impact on predictions? 1. how to extract insights from models? ### Prepare our data for our model ``` cols=["target","ID_code"] X = train.drop(cols,axis=1) y = train["target"] X_test = test.drop("ID_code",axis=1) ``` ### Create a sample model to calculate which feature are more important. ``` train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) rfc_model = RandomForestClassifier(random_state=0).fit(train_X, train_y) ``` <a id="52"></a> <br> ## 5-2 How to calculate and show importances? ### Here is how to calculate and show importances with the [eli5](https://eli5.readthedocs.io/en/latest/) library: ``` import eli5 from eli5.sklearn import PermutationImportance perm = PermutationImportance(rfc_model, random_state=1).fit(val_X, val_y) eli5.show_weights(perm, feature_names = val_X.columns.tolist(), top=150) ``` <a id="53"></a> <br> ## 5-3 What can be inferred from the above? 1. As you move down the top of the graph, the importance of the feature decreases. 1. The features that are shown in green indicate that they have a positive impact on our prediction 1. The features that are shown in white indicate that they have no effect on our prediction 1. The features shown in red indicate that they have a negative impact on our prediction 1. The most important feature was **Var_110**. <a id="54"></a> <br> ## 5-4 Partial Dependence Plots While **feature importance** shows what **variables** most affect predictions, **partial dependence** plots show how a feature affects predictions.[6][7] and partial dependence plots are calculated after a model has been fit. [partial-plots](https://www.kaggle.com/dansbecker/partial-plots) ``` train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) tree_model = DecisionTreeClassifier(random_state=0, max_depth=5, min_samples_split=5).fit(train_X, train_y) ``` For the sake of explanation, I use a Decision Tree which you can see below. ``` features = [c for c in train.columns if c not in ['ID_code', 'target']] from sklearn import tree import graphviz tree_graph = tree.export_graphviz(tree_model, out_file=None, feature_names=features) graphviz.Source(tree_graph) ``` As guidance to read the tree: 1. Leaves with children show their splitting criterion on the top 1. The pair of values at the bottom show the count of True values and False values for the target respectively, of data points in that node of the tree. ><font color="red"><b>Note: </b></font> Yes **Var_81** are more effective on our model. <a id="55"></a> <br> ## 5-5 Partial Dependence Plot In this section, we see the impact of the main variables discovered in the previous sections by using the [pdpbox](https://pdpbox.readthedocs.io/en/latest/). ``` from matplotlib import pyplot as plt from pdpbox import pdp, get_dataset, info_plots # Create the data that we will plot pdp_goals = pdp.pdp_isolate(model=tree_model, dataset=val_X, model_features=features, feature='var_81') # plot it pdp.pdp_plot(pdp_goals, 'var_81') plt.show() ``` <a id="56"></a> <br> ## 5-6 Chart analysis 1. The y axis is interpreted as change in the prediction from what it would be predicted at the baseline or leftmost value. 1. A blue shaded area indicates level of confidence ``` # Create the data that we will plot pdp_goals = pdp.pdp_isolate(model=tree_model, dataset=val_X, model_features=features, feature='var_82') # plot it pdp.pdp_plot(pdp_goals, 'var_82') plt.show() # Create the data that we will plot pdp_goals = pdp.pdp_isolate(model=tree_model, dataset=val_X, model_features=features, feature='var_139') # plot it pdp.pdp_plot(pdp_goals, 'var_139') plt.show() # Create the data that we will plot pdp_goals = pdp.pdp_isolate(model=tree_model, dataset=val_X, model_features=features, feature='var_110') # plot it pdp.pdp_plot(pdp_goals, 'var_110') plt.show() ``` <a id="57"></a> <br> ## 5-7 SHAP Values **SHAP** (SHapley Additive exPlanations) is a unified approach to explain the output of **any machine learning model**. SHAP connects game theory with local explanations, uniting several previous methods [1-7] and representing the only possible consistent and locally accurate additive feature attribution method based on expectations (see the SHAP NIPS paper for details). <img src='https://raw.githubusercontent.com/slundberg/shap/master/docs/artwork/shap_diagram.png' width=400 height=400> [image credits](https://github.com/slundberg/shap) ><font color="red"><b>Note: </b></font> Shap can answer to this qeustion : **how the model works for an individual prediction?** ``` row_to_show = 5 data_for_prediction = val_X.iloc[row_to_show] # use 1 row of data here. Could use multiple rows if desired data_for_prediction_array = data_for_prediction.values.reshape(1, -1) rfc_model.predict_proba(data_for_prediction_array); import shap # package used to calculate Shap values # Create object that can calculate shap values explainer = shap.TreeExplainer(rfc_model) # Calculate Shap values shap_values = explainer.shap_values(data_for_prediction) ``` If you look carefully at the code where we created the SHAP values, you'll notice we reference Trees in **shap.TreeExplainer(my_model)**. But the SHAP package has explainers for every type of model. 1. shap.DeepExplainer works with Deep Learning models. 1. shap.KernelExplainer works with all models, though it is slower than other Explainers and it offers an approximation rather than exact Shap values. ``` shap.initjs() shap.force_plot(explainer.expected_value[1], shap_values[1], data_for_prediction) ``` <a id="6"></a> <br> # 6- Model Development So far, we have used two models, and at this point we add another model and we'll be expanding it soon. in this section you will see following model: 1. lightgbm 1. RandomForestClassifier 1. DecisionTreeClassifier 1. CatBoostClassifier ## 6-1 lightgbm ``` # params is based on following kernel https://www.kaggle.com/brandenkmurray/nothing-works params = {'objective' : "binary", 'boost':"gbdt", 'metric':"auc", 'boost_from_average':"false", 'num_threads':8, 'learning_rate' : 0.01, 'num_leaves' : 13, 'max_depth':-1, 'tree_learner' : "serial", 'feature_fraction' : 0.05, 'bagging_freq' : 5, 'bagging_fraction' : 0.4, 'min_data_in_leaf' : 80, 'min_sum_hessian_in_leaf' : 10.0, 'verbosity' : 1} %%time y_pred_lgb = np.zeros(len(X_test)) num_round = 1000000 for fold_n, (train_index, valid_index) in enumerate(folds.split(X,y)): print('Fold', fold_n, 'started at', time.ctime()) X_train, X_valid = X.iloc[train_index], X.iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] train_data = lgb.Dataset(X_train, label=y_train) valid_data = lgb.Dataset(X_valid, label=y_valid) lgb_model = lgb.train(params,train_data,num_round,#change 20 to 2000 valid_sets = [train_data, valid_data],verbose_eval=1000,early_stopping_rounds = 3500)##change 10 to 200 y_pred_lgb += lgb_model.predict(X_test, num_iteration=lgb_model.best_iteration)/5 ``` <a id="62"></a> <br> ## 6-2 RandomForestClassifier ``` y_pred_rfc = rfc_model.predict(X_test) ``` <a id="63"></a> <br> ## 6-3 DecisionTreeClassifier ``` y_pred_tree = tree_model.predict(X_test) ``` <a id="64"></a> <br> ## 6-4 CatBoostClassifier ``` train_pool = Pool(train_X,train_y) cat_model = CatBoostClassifier( iterations=3000,# change 25 to 3000 to get best performance learning_rate=0.03, objective="Logloss", eval_metric='AUC', ) cat_model.fit(train_X,train_y,silent=True) y_pred_cat = cat_model.predict(X_test) ``` Now you can change your model and submit the results of other models. ``` submission_rfc = pd.DataFrame({ "ID_code": test["ID_code"], "target": y_pred_rfc }) submission_rfc.to_csv('submission_rfc.csv', index=False) submission_tree = pd.DataFrame({ "ID_code": test["ID_code"], "target": y_pred_tree }) submission_tree.to_csv('submission_tree.csv', index=False) submission_cat = pd.DataFrame({ "ID_code": test["ID_code"], "target": y_pred_cat }) submission_cat.to_csv('submission_cat.csv', index=False) # good for submit submission_lgb = pd.DataFrame({ "ID_code": test["ID_code"], "target": y_pred_lgb }) submission_lgb.to_csv('submission_lgb.csv', index=False) ``` <a id="65"></a> <br> ## 6-5 Funny Combine ``` submission_rfc_cat = pd.DataFrame({ "ID_code": test["ID_code"], "target": (y_pred_rfc +y_pred_cat)/2 }) submission_rfc_cat.to_csv('submission_rfc_cat.csv', index=False) submission_lgb_cat = pd.DataFrame({ "ID_code": test["ID_code"], "target": (y_pred_lgb +y_pred_cat)/2 }) submission_lgb_cat.to_csv('submission_lgb_cat.csv', index=False) submission_rfc_lgb = pd.DataFrame({ "ID_code": test["ID_code"], "target": (y_pred_rfc +y_pred_lgb)/2 }) submission_rfc_lgb.to_csv('submission_rfc_lgb.csv', index=False) ``` you can follow me on: > ###### [ GitHub](https://github.com/mjbahmani/) > ###### [Kaggle](https://www.kaggle.com/mjbahmani/) <b>I hope you find this kernel helpful and some <font color='red'>UPVOTES</font> would be very much appreciated.<b/> <a id="7"></a> <br> # 7- References & credits Thanks fo following kernels that help me to create this kernel. 1. [https://www.kaggle.com/dansbecker/permutation-importance](https://www.kaggle.com/dansbecker/permutation-importance) 1. [https://www.kaggle.com/dansbecker/partial-plots](https://www.kaggle.com/dansbecker/partial-plots) 1. [https://www.kaggle.com/miklgr500/catboost-with-gridsearch-cv](https://www.kaggle.com/miklgr500/catboost-with-gridsearch-cv) 1. [https://www.kaggle.com/dromosys/sctp-working-lgb](https://www.kaggle.com/dromosys/sctp-working-lgb) 1. [https://www.kaggle.com/gpreda/santander-eda-and-prediction](https://www.kaggle.com/gpreda/santander-eda-and-prediction) 1. [https://www.kaggle.com/dansbecker/permutation-importance](https://www.kaggle.com/dansbecker/permutation-importance) 1. [https://www.kaggle.com/dansbecker/partial-plots](https://www.kaggle.com/dansbecker/partial-plots) 1. [https://www.kaggle.com/dansbecker/shap-values](https://www.kaggle.com/dansbecker/shap-values) 1. [https://docs.microsoft.com/en-us/azure/machine-learning/studio/algorithm-choice](https://docs.microsoft.com/en-us/azure/machine-learning/studio/algorithm-choice) 1. [kernel https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65](kernel https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65) 1. [https://www.kaggle.com/brandenkmurray/nothing-works](https://www.kaggle.com/brandenkmurray/nothing-works)
github_jupyter
``` import pandas as pd import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline import numpy as np import pickle simulated = pd.read_csv('../fitness_model/simulation/simulated_freqs.csv', index_col=0) colors = pickle.load(open('../figures/colors.p', 'rb')) sns.set(style='whitegrid', font_scale=1.2) # model_performance = pd.read_csv('../fitness_model/simulation/model_performance_fine.csv', index_col = 0) # model_performance.drop('Unnamed: 0.1', axis=1, inplace=True) # model_performance = model_performance.reset_index() # fit_params = ['DENV1_f0','DENV2_f0','DENV3_f0','beta', 'gamma', 'sigma'] # metrics = ['abs_error', 'accuracy', 'beta', 'delta_sse', 'information_gain', 'pearson_r2', 'spearman_r'] # input_params = { # 'DENV1_f0': 0.7, # 'DENV2_f0': 0.85, # 'DENV3_f0': 0.4, # 'beta': 3.25, # 'gamma': 0.55, # 'sigma': 2.35 # } # def plot_profile_likelihoods(model_performance, metric): # if metric == 'abs_error' or metric == 'rmse': # best_fit = model_performance.ix[model_performance[metric].idxmin()] # else: # best_fit = model_performance.ix[model_performance[metric].idxmax()] # # print ('Best fit (optimizing %s):\n'%metric, best_fit) # fig, axes = plt.subplots(ncols=len(fit_params), nrows=1, figsize=(3*len(fit_params), 3), sharey=True) # for profile_param,ax in zip(fit_params, axes): # fixed_params = [p for p in fit_params if p != profile_param] # plot_data = model_performance # for fp in fixed_params: # plot_data = plot_data.loc[plot_data[fp] == best_fit[fp]] # sns.regplot(profile_param, metric, data=plot_data, fit_reg=False, ax=ax, color='gray') # ax.set_xlabel(profile_param) # ax.set_ylabel(metric) # ax.plot([input_params[profile_param], input_params[profile_param]], # [0,0.2], # linestyle='--', c='gray') # ax.set_ylim(0,0.2) # plt.tight_layout() # # plt.savefig('../profile-likelihoods/%s_%s_perf.png', dpi=300, bbox_inches='tight') # plt.show() # # return best_fit # plot_profile_likelihoods(model_performance, 'rmse') def plot_timecourse(df, ax = None, ls='-', ylabel = ''): if not ax: w = 12 h = 4 fig, ax = plt.subplots(1,1,figsize=(w,h)) for clade in df.columns.values: c = colors[clade] ax.plot(df[clade].index.values, df[clade], c=c, linestyle=ls, label=clade) ax.legend(loc=(1,.7)) ax.set_ylabel(ylabel) ax.set_ylim(0,1) ax.set_xlim(1970,2015) ax.set_ylabel('Simulated frequency') # plt.show() fig, ax = plt.subplots(figsize=(8,4)) plot_timecourse(simulated, ax) plt.savefig('./png/simulated_frequencies_high_beta.png') simulated_serotype_parameters = pd.read_csv('../fitness_model/simulation/simulated_fitParameters_freqs.csv', index_col=0) fig, ax = plt.subplots(1,1,figsize=(8,4)) plot_timecourse(simulated_serotype_parameters, ax) plt.savefig('./png/simulated_fitParameters_freqs.png', dpi=300, bbox_inches='tight') ```
github_jupyter
``` %matplotlib inline import pyvisa import numpy as np from pylabnet.utils.logging.logger import LogClient import pylabnet.hardware.spectrum_analyzer.agilent_e4405B as sa from pylabnet.network.client_server.agilent_e4405B import Client import matplotlib.pyplot as plt ``` # Instantiate and Connect Client ``` sa_client = Client( host='localhost', port=12352 ) ``` # Test Spectrum Analyzer Functionality ``` # Turn off display. sa_client.display_off() # Turn on display. sa_client.display_on() ``` ## Background acquisition Let's acquire a background trace which can be used to normalize the power spectrum with respect to the noise floor. ``` # Try to retrieve the background --> Will fail. background = sa_client.get_background() ``` Let's setup the spectrum analyzer to measure the frequency span of 11.7 GHz - 12.3 GHz: ``` # Center frequency at 12 GHz. center_freq = 10 # in GHz sa_client.set_center_frequency(center_freq*1e9) # Set trace span to 1 GHz. freq_span = 1 # in GHz sa_client.set_frequency_span(freq_span*1e9) sa_client.plot_trace() # Now let's acquire this trace 100 times and save the average trace. sa_client.acquire_background_spectrum(num_point=100, nocheck=False) # Now, we can retrieve the background trace, or plot it. background = sa_client.get_background() sa_client.plot_background() ``` ## Trace plotting and background substraction Now that the background spectrum is acquired, we can switch on the signal sources, in this case the output of a single-sideband upconversion setup. ``` # Read and plot trace after swichting on the sources. trace = sa_client.plot_trace() # Plot it with the background spectrum substracted (note the y-axis) trace = sa_client.plot_trace(background_substract=True) ``` We can also use arbitrary IEEE Common Commands defined for the E4405B (see [programming manual](https://literature.cdn.keysight.com/litweb/pdf/E4401-90507.pdf)). ``` # Set to single run mode using a write command. command = 'INIT:CONT 0' sa_client.write(command) # Check run mode using a query command. command = 'INIT:CONT?' sa_client.query(command) ``` # Markers If only the peak power for a well resolved peak is to be queried, acquiring the entire trace is not necessary. For this case, the built-in marker functionality can be used. It can be accessed by the helper class `E4405BMarker`. Let's try to define markers tracking the three peaks we can see in the trace, the upper sideband at 12.3 GHz, the lower sideband at 11.7 GHz and the carrier at 12 GHz. Let's start with defining the marker for the upper sideband: ``` # Marker for upper sideband. upp_sb = sa.E4405BMarker( e4405Bclient = sa_client, name = 'Upper Sideband', marker_num = 1 ) ``` This marker is now floating and not assigend to a peak. Let's assign it to the highest peak in the trace, which should correspond to the upper sideband at 12.3 GHz. ``` # Park on maximal peak. upp_sb.set_to_maximum() # Read freq. upp_sb.read_freq() / 1e9 ``` We have successfully parked on the upper sideband at 12.3 GHz. We can read out the power easily: ``` # Read power in dbm. upp_sb.get_power() ``` Let's now add a second marker and park it on the lower sideband at 11.7 GHz. We start we defining the marker and parking it on the maximum peak. ``` # Marker for lower sideband. lower_sb = sa.E4405BMarker( e4405Bclient = sa_client, name = 'Lower Sideband', marker_num = 2 ) # Park on maximal peak. lower_sb.set_to_maximum() lower_sb.read_freq() /1e9 ``` As expected, we are parked on the upper sideband, which corresponds to the highest peak. To switch the peak assignment, we can just use the `look_left` function: ``` # We're still parked at the upper sideband at 12.3 GHz. # Let's use the 'look left function' to switch to the next peak left of the current peak position: lower_sb.look_left() lower_sb.read_freq() / 1e9 ``` Now, we are parked at the carrier peak at 12 GHz, thus we need to move another peak to the left: ``` # Now we're at the carrier peak at 12 GHZ. Let's look left one more time: lower_sb.look_left() lower_sb.read_freq() / 1e9 ``` Now we're successfully parked on the lower sideband at 11.7 GHz. We can finally park the third marker at the carrier frequency of 12 GHz by calling `look_left` one time: ``` # marker for carrier carrier = sa.E4405BMarker( e4405Bclient = sa_client, name = 'Carrier', marker_num = 3 ) carrier.set_to_maximum() carrier.look_left() carrier.read_freq() / 1e9 ``` We have successfully parked our three markers: ``` for marker in [lower_sb, carrier, upp_sb]: print(f"Marker '{marker.name}' parked at {marker.read_freq() / 1e9:.2f} GHz reads {marker.get_power():.2f} dbm.") ```
github_jupyter