code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import lib # =========================================================== import csv import pandas as pd from datascience import * import numpy as np import random import time import matplotlib.pyplot as plt # %matplotlib inline plt.style.use('fivethirtyeight') import collections import math import sys from tqdm import tqdm from time import sleep # + # Initialize useful data # with open('clinvar_conflicting_clean.csv', 'r') as f: # reader = csv.reader(f) # temp_rows = list(reader) df = pd.read_csv('clinvar_conflicting_clean.csv', low_memory=False) # columns_to_change = ['ORIGIN', 'EXON', 'INTRON', 'STRAND', 'LoFtool', 'CADD_PHRED', 'CADD_RAW', 'BLOSUM62'] # df[['CLNVI', 'MC', 'SYMBOL', 'Feature_type', 'Feature', 'BIOTYPE', # 'cDNA_position', 'CDS_position', 'Protein_position', 'Amino_acids', 'Codons', # 'BAM_EDIT', 'SIFT', 'PolyPhen']] = df[['CLNVI', 'MC', 'SYMBOL', 'Feature_type', 'Feature', 'BIOTYPE', # 'cDNA_position', 'CDS_position', 'Protein_position', 'Amino_acids', 'Codons', # 'BAM_EDIT', 'SIFT', 'PolyPhen']].fillna(value="null") df = df.fillna(value=0) df_zero = df.loc[df['CLASS'] == 0] df_zero = df_zero.sample(n=10000) df_one = df.loc[df['CLASS'] == 1] df_one = df_one.sample(n=10000) df = pd.concat([df_zero, df_one]) df = df.sample(n = df.shape[0]) all_rows = df.values.tolist() row_num = len(all_rows) df.head() # + # Decision stump part for Adaboost # =========================================================== def is_numeric(value): return isinstance(value, int) or isinstance(value, float) # === LeafNode is the prediction result of this branch === class LeafNode: def __init__(self, rows): labels = [row[-1] for row in rows] # labels = [] # self.one_idx = [] # self.zero_idx = [] # for i in range(len(rows)): # row = rows[i] # labels.append(row[-1]) # if row[-1] == 1: # self.one_idx.append(i) # else: # self.zero_idx.append(i) self.prediction = collections.Counter(labels) # === DecisionNode is an attribute / question used to partition the data === class DecisionNode: def __init__(self, question = None, left_branch = None, right_branch = None): self.question = question self.left_branch = left_branch self.right_branch = right_branch class DecisionStump: def __init__(self, training_attribute, training_data, height, method = "CART"): self.attribute = training_attribute # takein attribute and data separately self.train = training_data self.height = height self.row_num = len(self.train) self.column_num = len(self.attribute) self.method = method.upper() # convert to upper case for general use self.significance = 0 if self.method not in ["C4.5", "CART", "HYBRID"]: print("Error: Please choose a valid method! from: [C4.5, CART, HYBRID]") return None # train decision stump self.root = self.build_stump(self.train, 1) # count ACC classifications and mis classifications to update weights self.accclassify_idx = [] self.misclassify_idx = [] # Only after DecisionStump trained, can we know which rows are misclassified # Walk down the decision stump to collect all misclassification indices # if self.root.left_branch.prediction.get(1, 0) > self.root.left_branch.prediction.get(0, 0): # # then consider the prediction of this leaf node as 1: 1 -> correct, 0 -> misclassify # self.accclassify_idx += self.root.left_branch.one_idx # self.misclassify_idx += self.root.left_branch.zero_idx # else: # # then consider the prediction of this leaf node as 0: 0 -> correct, 1 -> misclassify # self.accclassify_idx += self.root.left_branch.zero_idx # self.misclassify_idx += self.root.left_branch.one_idx # if self.root.right_branch.prediction.get(1, 0) > self.root.right_branch.prediction.get(0, 0): # # then consider the prediction of this leaf node as 1: 1 -> correct, 0 -> misclassify # self.accclassify_idx += self.root.right_branch.one_idx # self.misclassify_idx += self.root.right_branch.zero_idx # else: # # then consider the prediction of this leaf node as 0: 0 -> correct, 1 -> misclassify # self.accclassify_idx += self.root.right_branch.zero_idx # self.misclassify_idx += self.root.right_branch.one_idx def uniq_val(self, column): return set([self.train[i][column] for i in range(len(self.train))]) # when raising a question. # if it's a categorical attribute, we simply iterate all categories # if it's a numeric attribute, we iterate the set of possible numeric values class Question: def __init__(self, column, ref_value, attribute): self.column = column self.ref_value = ref_value if ref_value else "None" self.attri = attribute def match(self, row): if is_numeric(self.ref_value): try: return row[self.column] >= self.ref_value except: print("Error occured in ", row) return True else: return row[self.column] == self.ref_value def __repr__(self): operand = ">=" if is_numeric(self.ref_value) else "==" return "Is %s %s %s?" % (self.attri[self.column], operand, str(self.ref_value)) # === Method 1 - C4.5 === def entropy(self, rows): # === Bits used to store the information === labels = [row[-1] for row in rows] frequency = collections.Counter(labels).values() pop = sum(frequency) H = 0 for f in frequency: p = f / pop H -= p * math.log(p, 2) return H # === Method 2 - CART === def gini(self, rows): # === Probability of misclassifying any of your label, which is impurity === labels = [row[-1] for row in rows] frequency = collections.Counter(labels).values() pop = sum(frequency) gini = 1 for f in frequency: p = f / pop gini -= p ** 2 return gini # === Calculate Gain Info === def info(self, branches, root): # === Objective: to find the best question which can maximize info === root_size = float(len(root)) if self.method == "C4.5": # Here I pick the GainRatio Approach root_uncertainty = self.entropy(root) gain_info = root_uncertainty split_info = 0 for branch in branches: if not branch: continue gain_info -= len(branch) / root_size * self.entropy(branch) split_info -= float(len(branch)) / root_size * math.log(float(len(branch)) / root_size) # print(gain_info, split_info) return gain_info / split_info elif self.method == "CART": root_uncertainty = self.gini(root) gain_info = root_uncertainty for branch in branches: if not branch: continue gain_info -= len(branch) / root_size * self.gini(branch) return gain_info elif self.method == "HYBRID": pass pass # === Here I only do Binary Partitions === def partition(self, rows, question): true_rows = [] false_rows = [] for row in rows: if question.match(row): true_rows.append(row) else: false_rows.append(row) return true_rows, false_rows # the question that achieves the max infomation attenuation is the best question def find_best_question(self, rows): max_info_attenuation = 0 best_question = self.Question(0, self.train[0][0], self.attribute) # === Iterate through all question candidates === # === TODO: Maybe Iteration here can be optimized === for col in range(self.column_num - 1): # minus 1 to avoid using the label as attribute ref_candidates = self.uniq_val(col) for ref_value in ref_candidates: if ref_value == "null": continue # avoid using null values to generate a question q = self.Question(col, ref_value, self.attribute) temp_true_rows, temp_false_rows = self.partition(rows, q) temp_info_attenuation = self.info([temp_true_rows, temp_false_rows], rows) if temp_info_attenuation >= max_info_attenuation: max_info_attenuation = temp_info_attenuation best_question = q return max_info_attenuation, best_question # === Input rows of data with attributes and labels === def build_stump(self, rows, height): # === Assign all rows as root of the whole decision tree === # === We have met the leaf node if gini(rows) is 0 or no question candidates left === gain_reduction, q = self.find_best_question(rows) true_rows, false_rows = self.partition(rows, q) if height + 1 >= self.height: return DecisionNode(q, LeafNode(true_rows), LeafNode(false_rows)) else: return DecisionNode(q, self.build_stump(true_rows, height + 1), self.build_stump(false_rows, height + 1)) # === Input a row of data with attributes (and no label), predict its label with our decision tree === # === Actually it can contain a label, we just don't use it === # === walk down the decision tree until we reach the leaf node === def classify(self, row, node): if isinstance(node, LeafNode): # do a mapping from label[1, 0] to label[1, -1] return node.prediction # return 1 if node.prediction.get(1, 0) / (node.prediction.get(1, 0) + node.prediction.get(0, 0)) > cutoff else -1 if node.question.match(row): return self.classify(row, node.left_branch) else: return self.classify(row, node.right_branch) # function to print the tree out def print_tree(self, node, spacing=""): # Base case: we've reached a leaf if isinstance(node, LeafNode): print (spacing + "Predict", node.prediction) return # Print the question at this node print (spacing + str(node.question)) # Call this function recursively on the true branch print (spacing + '--> True:') self.print_tree(node.left_branch, spacing + " ") # Call this function recursively on the false branch print (spacing + '--> False:') self.print_tree(node.right_branch, spacing + " ") def test(self): for i in range(self.column_num): q = self.Question(i, self.train[1][i], self.attribute) print(q) print(q.match(1)) def normalized_weight(weight): return np.divide(weight, sum(weight)) def rev_logit(val): return 1 / (1 + np.exp(val)) # - # + # Merged Adaboost # =========================================================== # Init, Train, Test together # =========================================================== training_percentage = 0.2 # percent of partition of training dataset training_size = int(row_num * training_percentage) testing_size = row_num - training_size training_attribute = list(df.columns) testing_data = all_rows[training_size: ] # testing data don't need to include header row T = 20 weaklearner_height = 4 ROC = Table(make_array('CUTOFF', 'TN', 'FN', 'FP', 'TP', 'ACC')) step_size = 0.05 CMap = {0: 'TN', 1: 'FN', 2: 'FP', 3: 'TP'} start = time.time() for cutoff in np.arange(0.55, 1 + step_size, step_size): # interactive printing sys.stdout.write('\r') sys.stdout.write("Boosting: [%-20s] %d%% cutoff: %.02f" % ('='*int(cutoff * 100 / 5), int(cutoff * 100), cutoff)) sys.stdout.flush() # Reinit training set # =========================================================== training_data = all_rows[: training_size] # Training for this cutoff # =========================================================== stump_forest = [] weight = [1 / training_size for _ in range(training_size)] for i in range(T): # train a decision stump stump = DecisionStump(training_attribute, training_data, weaklearner_height, "CART") # calculate the total error of the stump after it's trained for j in range(training_size): row = training_data[j] pred_counter = stump.classify(row, stump.root) pred_label = 1 if pred_counter.get(1, 0) / (pred_counter.get(1, 0) + pred_counter.get(0, 0) + 0.00000001) > cutoff else 0 if pred_label == row[-1]: stump.accclassify_idx.append(j) else: stump.misclassify_idx.append(j) accuracy = len(stump.accclassify_idx) / training_size total_err_rate = 1 - accuracy # update the significance level of this stump, remember not to divide by zero stump.significance = 0.5 * math.log((1 - total_err_rate + 0.0001) / (total_err_rate + 0.0001)) # append stump into the forest stump_forest.append(stump) # if len(stump_forest) == T: break # early break # update training_data weight, resample the training data with the updated weight distribution true_scale = np.e ** stump.significance for idx in stump.misclassify_idx: weight[idx] = weight[idx] * true_scale for idx in stump.accclassify_idx: weight[idx] = weight[idx] * (1 / true_scale) distrib = normalized_weight(weight) resampled_idx = np.random.choice(training_size, training_size, p = distrib) training_data = [training_data[idx] for idx in resampled_idx] if len(set([row[1] for row in training_data])) < 0.04 * training_size: break # print(i, len(set([row[1] for row in training_data])), stump.significance, end='\n') # re-init the weight of the training data rows to be even weight = [1 / training_size for _ in range(training_size)] # forest_size = len(stump_forest) # Testing for this cutoff # =========================================================== Confusion = {'TN': 0, 'FN': 0, 'FP': 0, 'TP': 0} for row in testing_data: true_rate_forest = 0 for tree_i in stump_forest: # prediction is a counter of label 1 and 0 pred_counter = tree_i.classify(row, tree_i.root) # do a mapping from label[1, 0] to label[1, -1] true_rate_tree = 1 if pred_counter.get(1, 0) / (pred_counter.get(1, 0) + pred_counter.get(0, 0) + 0.00000001) > cutoff else -1 true_rate_forest += true_rate_tree * tree_i.significance # true_rate_forest = rev_logit(true_rate_forest) # true_pred = 1 if true_rate_forest >= cutoff else 0 true_pred = 0 if np.sign(true_rate_forest) <= 0 else 1 indicator = (true_pred << 1) + row[-1] # accordingly update confusion matrix Confusion[CMap[indicator]] += 1 # concatenate the confusion matrix values into the overall ROC Table thisline = [cutoff] + list(Confusion.values()) + [(Confusion['TP'] + Confusion['TN']) / sum(Confusion.values())] ROC = ROC.with_row(thisline) end = time.time() print("\nTime: %.02fs" % (end - start)) ROC = ROC.with_columns('SENSITIVITY', ROC.apply(lambda TP, FN: TP / (TP + FN + 0.00000001), 'TP', 'FN')) ROC = ROC.with_columns('FPR', ROC.apply(lambda TN, FP: FP / (TN + FP + 0.00000001), 'TN', 'FP')) ROC = ROC.with_column('FMEAS', ROC.apply(lambda TP, FP, FN: 2 * (TP / (TP + FN)) * (TP / (TP + FP)) / (TP / (TP + FN) + TP / (TP + FP)), 'TP', 'FP', 'FN')) # - ROC.show() ROC = Table(make_array('CUTOFF', 'TN', 'FN', 'FP', 'TP', 'ACC')) ROC = ROC.with_rows(make_array(make_array(0, 8008, 7974, 14, 4, 0.50075), make_array(0.05, 1785, 607, 6256, 7352, 0.571063), make_array(0.1, 2172, 761, 5869, 7198, 0.585625), make_array(0.15, 1604, 343, 6437, 7616, 0.57625), make_array(0.2, 3190, 1282, 4851, 6677, 0.616688), make_array(0.25, 3937, 1464, 4104, 6495, 0.652), make_array(0.3, 5262, 2775, 2779, 5184, 0.652875), make_array(0.35, 4705, 2106, 3309, 5880, 0.661563), make_array(0.4, 4674, 2398, 3340, 5588, 0.641375), make_array(0.45, 4573, 2130, 3441, 5856, 0.651813), make_array(0.5, 4939, 2947, 3083, 5031, 0.623125), make_array(0.55, 5220, 3501, 2794, 4485, 0.606563), make_array(0.6, 5635, 4406, 2379, 3580, 0.575937), make_array(0.65, 5015, 2941, 2999, 5045, 0.62875), make_array(0.7 ,3883, 1891, 4131, 6095, 0.623625), make_array(0.75, 6297, 3972, 1717, 4014, 0.644437), make_array(0.8, 6672, 5008, 1342, 2978, 0.603125), make_array(0.85, 7043, 5570, 971 , 2416, 0.591187), make_array(0.9, 0 , 0 , 8014, 7986, 0.499125), make_array(0.95, 7857, 7953, 157 , 33, 0.493125), make_array(1, 7998, 7812, 178 , 12, 0.500625), )) # + # cutoff = 0: 0 8008 7974 14 4 0.50075 # curoff = 0.05: 0.05 1785 607 6256 7352 0.571063 # cutoff = 0.1: 0.1 2172 761 5869 7198 0.585625 # cutoff = 0.15: 0.15 1604 343 6437 7616 0.57625 # cutoff = 0.2: 0.2 3190 1282 4851 6677 0.616688 # cutoff = 0.25: 0.25 3937 1464 4104 6495 0.652 # cutoff = 0.3: 0.3 5262 2775 2779 5184 0.652875 # cutoff = 0.35: 0.35 4705 2106 3309 5880 0.661563 # cutoff = 0.4: 0.4 4674 2398 3340 5588 0.641375 # cutoff = 0.45: 0.45 4573 2130 3441 5856 0.651813 # cutoff = 0.5: 0.5 4939 2947 3083 5031 0.623125 # cutoff = 0.55: 0.55 5220 3501 2794 4485 0.606563 # cutoff = 0.6: 0.6 5635 4406 2379 3580 0.575937 # cutoff = 0.65: 0.65 5015 2941 2999 5045 0.62875 # cutoff = 0.7: 0.7 3883 1891 4131 6095 0.623625 # cutoff = 0.75: 0.75 6297 3972 1717 4014 0.644437 # cutoff = 0.8: 0.8 6672 5008 1342 2978 0.603125 # cutoff = 0.85: 0.85 7043 5570 971 2416 0.591187 # cutoff = 0.9: 0.9 0 0 8014 7986 0.499125 # cutoff = 0.95: 0.95 7857 7953 157 33 0.493125 # cutoff = 1: 1 7998 7812 178 12 0.500625 # - # Acc Curve by cutoff # =========================================================== # matplotlib.use('TkAgg') fig = plt.figure() plt.xlabel('Cutoff') plt.ylabel('Accuracy') plt.title('Accuracy - Cutoff of Adaboost') plt.plot(np.arange(0, 1.1, 0.1), [0.5 for i in np.arange(0, 1.1, 0.1)], color='black') plt.plot(ROC.column('CUTOFF'), ROC.column('ACC'), color='orange') plt.legend(['Adaboost', 'Null']) plt.axis([0, 1, 0, 1.1]) plt.show() fig.savefig('Adaboost ACC.png', bbox_inches='tight') # + # ROC_CURVE # =========================================================== fig = plt.figure() plt.xlabel('False Positive Rate') plt.ylabel('Sensitivity') plt.title('ROC - Curve of Adaboost') plt.plot(np.arange(0, 1.1, 0.1), np.arange(0, 1.1, 0.1), color='black') plt.plot(ROC.column('FPR'), ROC.column('SENSITIVITY'), color='orange') plt.legend(['Adaboost', 'Null']) plt.axis([0, 1, 0, 1.1]) plt.show() fig.savefig('Adaboost ROC.png', bbox_inches='tight') # - # Compute AUC # =========================================================== length = len(ROC.column('FPR')) auc = 0 for i in range(length - 1): auc += 0.5 * abs(ROC.column('FPR')[i + 1] - ROC.column('FPR')[i]) * (ROC.column('SENSITIVITY')[i] + ROC.column('SENSITIVITY')[i + 1]) print("auc = %.03f" %auc) fpr, sen, acc = ROC.column('FPR'), ROC.column('SENSITIVITY'), ROC.column('ACC') fpr sen acc
.ipynb_checkpoints/Adaboost-Copy1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Caso: Identificando Componentes en una Empresa # ## Cargar archivos csv # Utilizamos un Call Detail Record de ejemplo # ##### Clientes llamantes: 199 # ##### Clientes llamados: 400 # ##### Horizonte de tiempo: De Nov-2012 a Ene-2014 (15 meses) import pandas as pd import numpy as np import matplotlib.pyplot as plt import networkx as nx import time h = pd.read_csv('../data/llamadas.csv') # Revisamos la información del DataFrame h.info() # Transformamos el DataFrame en un Grafo dirigido con pesos (cantidad de llamadas y duración) G = nx.from_pandas_edgelist(h, source="fromuserid", target="touserid", edge_attr=["numbercalls","secondscalls"], create_using=nx.DiGraph()) print(nx.info(G)) # Crear la función top_nodes que mostrará los valores más altos de un diccionario # #### Análisis de Conectividad # Guardar el grado de cada nodo en un diccionario nx.is_strongly_connected(G) nx.is_weakly_connected(G) nx.number_strongly_connected_components(G) # + # list(nx.strongly_connected_components(G)) # - nx.number_weakly_connected_components(G) # + # list(nx.weakly_connected_components(G)) # - # #### Extrayendo el Componente Fuertemente Conectado más grande de la Red SC = max(nx.strongly_connected_components(G), key=len) print(SC) CF = G.subgraph(SC) plt.figure(figsize=(80,45)) nx.draw_networkx(CF, width=0.1) # #### Extrayendo el Componente Débilmente Conectado más grande de la Red WC = max(nx.weakly_connected_components(G), key=len) print(WC) CD = G.subgraph(WC) plt.figure(figsize=(80,45)) nx.draw_networkx(CF, width=0.1) # Elaborado por <NAME> bajo licencia MIT (2021)
notebooks/Lab_08.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # cTraderFix Jupyter Sample # # In this Jupyter notebook we will use the Python package "ctrader-fix" to interact with cTrader FIX API. # # Let's start. # If you haven't already installed the "ctrader-fix" package, run the next code cell to install it via pip: # !pip install ctrader-open-api # Then we have to import all necessary types: from twisted.internet import reactor import json from ctrader_fix import * import datetime # Now we use the "config-trade.json" config file to get your API credentials. # Be sure to populate it with your API credentials before running next cell: # you can use two separate config files for QUOTE and TRADE with open("config-quote.json") as configFile: config = json.load(configFile) # Then we will create a client based on our config: client = Client(config["Host"], config["Port"], ssl = config["SSL"]) # We will use ther below "send" method for sending our message: def send(request): diferred = client.send(request) diferred.addCallback(lambda _: print("\nSent: ", request.getMessage(client.getMessageSequenceNumber()).replace("", "|"))) # We are going to: # # 1. Send a log on requst # 2. After receiving log on response we send security list request # 3. Then if host is Trade we create a new market order for second symbol otherwise we send a mrket data request for the second symbol # # Let's set the client call backs that will be used by client after it got message received/connected/disconnected: # + def onMessageReceived(client, responseMessage): # Callback for receiving all messages print("\nReceived: ", responseMessage.getMessage().replace("", "|")) # We get the message type field value messageType = responseMessage.getFieldValue(35) # we send a security list request after we received logon message response if messageType == "A": securityListRequest = SecurityListRequest(config) securityListRequest.SecurityReqID = "A" securityListRequest.SecurityListRequestType = 0 send(securityListRequest) # After receiving the security list we send a market order request by using the security list first symbol elif messageType == "y": # We use getFieldValue to get all symbol IDs, it will return a list in this case # because the symbol ID field is repetitive symboldIds = responseMessage.getFieldValue(55) if config["TargetSubID"] == "TRADE": newOrderSingle = NewOrderSingle(config) newOrderSingle.ClOrdID = "B" newOrderSingle.Symbol = symboldIds[1] newOrderSingle.Side = 1 newOrderSingle.OrderQty = 1000 newOrderSingle.OrdType = 1 newOrderSingle.Designation = "From Jupyter" send(newOrderSingle) else: marketDataRequest = MarketDataRequest(config) marketDataRequest.MDReqID = "a" marketDataRequest.SubscriptionRequestType = 1 marketDataRequest.MarketDepth = 1 marketDataRequest.NoMDEntryTypes = 1 marketDataRequest.MDEntryType = 0 marketDataRequest.NoRelatedSym = 1 marketDataRequest.Symbol = symboldIds[1] send(marketDataRequest) # after receiving the new order request response we stop the reactor # And we will be disconnected from API elif messageType == "8" or messageType == "j": print("We are done, stopping the reactor") reactor.stop() def disconnected(client, reason): # Callback for client disconnection print("\nDisconnected, reason: ", reason) def connected(client): # Callback for client connection print("Connected") logonRequest = LogonRequest(config) send(logonRequest) # Setting client callbacks client.setConnectedCallback(connected) client.setDisconnectedCallback(disconnected) client.setMessageReceivedCallback(onMessageReceived) # - # The last step is to run our client service, it will run inside Twisted reactor loop asynchronously: # Starting the client service client.startService() # Run Twisted reactor, we imported it earlier reactor.run()
samples/jupyter/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import re import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import arabicABC as abc from fuzzywuzzy import fuzz # Deleted character `"` at 5735 and 24523 in column `punc`. This caused issue in reading. # Deleted column `punc`. Deleted all numbers. Deleted all empty rows. # Normalized using `afifi.letters.apply(abc.normalize)` # + # afifi = pd.read_csv('AfifiOCR.tsv', sep='\t') # - afifi = pd.read_csv('AfifiCleaned.csv') afifi afifi[afifi.letters.str.contains('\\d',regex=True)] # Normalize the text afifi['letters'] = afifi['letters'].apply(abc.normalize) # What does Afifi's text consist of? letters = set() for word in afifi.letters.tolist(): used = set(word) letters = letters | used alphabeticalLetters = sorted(list(letters)) for a in alphabeticalLetters: print(a) # + # afifi = afifi[afifi.letters.str.len()!=0] # afifi.to_csv('AfifiCleaned.csv',index=False) # - # 14,288 = delete # # + jupyter={"outputs_hidden": true} print('Number of single letter entries: '+str(afifi[afifi.letters.str.len()==1].shape[0])) for i in afifi[afifi.letters.str.len()==1].index: context = afifi.loc[i-2].letters + " " + afifi.loc[i-1].letters + " _" + afifi.loc[i].letters + "_ " + afifi.loc[i+1].letters + " " + afifi.loc[i+2].letters page = str(afifi.loc[i].page) line = str(afifi.loc[i].line) print("row " +str(i) + " page "+ page + " line "+line) print(context + '\n') # - afifi[afifi.letters.str.contains(abc.HAMZA_BELOW)] afifi.letters = afifi.letters.str.replace(abc.HAMZA_BELOW,"") afifi[afifi.letters.str.len()==0]
fusust-text-laboratory/AfifiCleaner.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # FitWidget # # FitWidget is a widget to fit curves (1D data) with interactive configuration options, to set constraints, adjust initial estimate parameters... # # # ## Creating a FitWidget # # First load the data. # + # opening qt widgets in a Jupyter notebook # %gui qt # in a regular terminal, run the following 2 lines: # from silx.gui import qt # app = qt.QApplication([]) # + # #%pylab inline import silx.io specfile = silx.io.open("data/31oct98.dat") xdata = specfile["/22.1/measurement/TZ3"] ydata = specfile["/22.1/measurement/If4"] from silx.gui.plot import Plot1D plot=Plot1D() plot.addCurve(x=xdata, y=ydata) plot.show() # - # Then create a FitWidget. # + from silx.gui.fit import FitWidget fw = FitWidget() fw.setData(x=xdata, y=ydata) fw.show() # - # ![FitWidget](fitwidget1.png) # The selection of fit theories and background theories can be done through the interface. Additional configuration parameters can be set in a dialog, by clicking the configure button, to alter the behavior of the estimation function (peak search parameters) or to set global constraints. # # ![FitConfig](fitconfig.png) # # When the configuration is done, click the Estimate button. Now you may change individual constraints or adjust initial estimated parameters. # # You can also add peaks by selecting *Add* in the dropdown list in the *Constraints* column of any parameter, or reduce the number of peaks by selecting *Ignore*. # # When you are happy with the estimated parameters and the constraints, you can click the "Fit" button. At the end of the fit process, you can again adjust the constraints and estimated parameters, and fit again. Only click "Estimate" if you want to reset the estimation and all constraints (this will overwrite all adjustements you have done). # ## Open the FitWidget through a PlotWindow # # Rather than instantiating your own FitWidget and loading the data into it, you can just select a curve and click the fit icon inside a PlotWindow or a Plot1D widget. # # A Plot1D always has the fit icon available, but for a PlotWindow you must specify an option `fit=True` when instantiating the widget. # + from silx.gui.plot import PlotWindow pw = PlotWindow(fit=True, control=True) pw.addCurve(x=xdata, y=ydata) pw.show() # - # A FitWidget opened through a PlotWindow is connected to the plot and will display the fit results in the PlotWindow, which is great for comparing the fit against the original data. # ## Exercice # # Write a cubic polynomial function $y= ax^3 + bx^2 + cx + d$ and its corresponding estimation function (use $a=1, b=1, c=1, d=1$ for initial estimated parameters). # # Generate synthetic data. # # Create a FitWidget and add a cubic polynomial function to the dropdown list. Test it on the synthetic data. # # ### Polynomial function # # Tips: # - Read the documentation for the module ``silx.math.fit.fittheory`` to use the correct signature for the polynomial and for the estimation functions. # - Read the documentation for the module ``silx.math.fit.leastsq`` to use the correct format for constraints. Disable all constraints (set them to FREE) # # Links: # - http://pythonhosted.org/silx/modules/math/fit/fittheory.html#silx.math.fit.fittheory.FitTheory.function # - http://pythonhosted.org/silx/modules/math/fit/fittheory.html#silx.math.fit.fittheory.FitTheory.estimate # - http://pythonhosted.org/silx/modules/math/fit/leastsq.html#silx.math.fit.leastsq # # # + # fill-in the blanks def cubic_poly(x, ...): """y = a*x^3 + b*x^2 + c*x +d :param x: numpy array of abscissa data :return: numpy array of y values """ return ... def estimate_cubic_params(...): initial_params = ... constraints = ... return initial_params, constraints # - # ### Synthetic data # # Tip: use the `cubic_poly` function # import numpy x = numpy.linspace(0, 100, 250) a, b, c, d = 0.02, -2.51, 76.76, 329.14 y = ... # ### FitWidget with custom function # # Tips: # # - you need to define a customized FitManager to initialize a FitWidget with custom functions # # Doc: # # - http://www.silx.org/doc/silx/dev/modules/math/fit/fitmanager.html#silx.math.fit.fitmanager.FitManager.addtheory # + # %gui qt #from silx.gui import qt from silx.gui.fit import FitWidget from silx.math.fit import FitManager # Uncomment this line if not in a jupyter notebook # a = qt.QApplication([]) ... fitwidget = FitWidget(...) fitwidget.setData(x=x, y=y) fitwidget.show() # -
silx/processing/fit/FitWidget.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ##### Given a linked list and two integers M and N. Traverse the linked list such that you retain M nodes then delete next N nodes, continue the same until end of the linked list. That is, in the given linked list you need to delete N nodes after every M nodes. # # Input format : # # Line 1 : Linked list elements (separated by space and terminated by -1) # # Line 2 : M # # Line 3 : N # Sample Input 1 : # # 1 2 3 4 5 6 7 8 -1 # 2 # 2 # # Sample Output 1 : # # 1 2 5 6 # # Sample Input 2 : # # 1 2 3 4 5 6 7 8 -1 # 2 # 3 # # Sample Output 2 : # # 1 2 6 7 # # # + class Node: def __init__(self, data): self.data = data self.next = None def skipMdeleteN(head, M, N): if head is None: return head if M == 0: return None temp = head prev = None cnt = 0 while temp is not None: if(cnt == M): break cnt += 1 prev = temp temp = temp.next cnt = 0 while temp is not None: if(cnt == N): break cnt += 1 temp = temp.next prev.next = skipMdeleteN(temp, M, N) return head def ll(arr): if len(arr)==0: return None head = Node(arr[0]) last = head for data in arr[1:]: last.next = Node(data) last = last.next return head def printll(head): while head: print(head.data, end=' ') head = head.next print() arr=list(int(i) for i in input().strip().split(' ')) l = ll(arr[:-1]) m=int(input()) n=int(input()) l = skipMdeleteN(l, m, n) printll(l) # -
10 Linked List-2/10.11 Delete every N nodes .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + dc={"key": "3"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 1. Import and observe dataset # <p>We all love watching movies! There are some movies we like, some we don't. Most people have a preference for movies of a similar genre. Some of us love watching action movies, while some of us like watching horror. Some of us like watching movies that have ninjas in them, while some of us like watching superheroes.</p> # <p>Movies within a genre often share common base parameters. Consider the following two movies:</p> # <p><img style="margin:5px 20px 5px 1px; height: 250px; display: inline-block;" alt="2001: A Space Odyssey" src="https://assets.datacamp.com/production/project_648/img/movie1.jpg"> # <img style="margin:5px 20px 5px 1px; height: 250px; display: inline-block;" alt="Close Encounters of the Third Kind" src="https://assets.datacamp.com/production/project_648/img/movie2.jpg"></p> # <p>Both movies, <em>2001: A Space Odyssey</em> and <em>Close Encounters of the Third Kind</em>, are movies based on aliens coming to Earth. I've seen both, and they indeed share many similarities. We could conclude that both of these fall into the same genre of movies based on intuition, but that's no fun in a data science context. In this notebook, we will quantify the similarity of movies based on their plot summaries available on IMDb and Wikipedia, then separate them into groups, also known as clusters. We'll create a dendrogram to represent how closely the movies are related to each other.</p> # <p>Let's start by importing the dataset and observing the data provided.</p> # + dc={"key": "3"} tags=["sample_code"] # Import modules import numpy as np import pandas as pd import nltk # Set seed for reproducibility np.random.seed(5) # Read in IMDb and Wikipedia movie data (both in same file) movies_df = pd.read_csv('datasets/movies.csv') print("Number of movies loaded: %s " % (len(movies_df))) # Display the data movies_df # + dc={"key": "10"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 2. Combine Wikipedia and IMDb plot summaries # <p>The dataset we imported currently contains two columns titled <code>wiki_plot</code> and <code>imdb_plot</code>. They are the plot found for the movies on Wikipedia and IMDb, respectively. The text in the two columns is similar, however, they are often written in different tones and thus provide context on a movie in a different manner of linguistic expression. Further, sometimes the text in one column may mention a feature of the plot that is not present in the other column. For example, consider the following plot extracts from <em>The Godfather</em>:</p> # <ul> # <li>Wikipedia: "On the day of his only daughter's wedding, <NAME>"</li> # <li>IMDb: "In late summer 1945, guests are gathered for the wedding reception of <NAME>'s daughter Connie"</li> # </ul> # <p>While the Wikipedia plot only mentions it is the day of the daughter's wedding, the IMDb plot also mentions the year of the scene and the name of the daughter. </p> # <p>Let's combine both the columns to avoid the overheads in computation associated with extra columns to process.</p> # + dc={"key": "10"} tags=["sample_code"] # Combine wiki_plot and imdb_plot into a single column movies_df['plot'] = movies_df['wiki_plot'].astype(str) + "\n" + \ movies_df['imdb_plot'].astype(str) # Inspect the new DataFrame movies_df.head() # + dc={"key": "17"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 3. Tokenization # <p>Tokenization is the process by which we break down articles into individual sentences or words, as needed. Besides the tokenization method provided by NLTK, we might have to perform additional filtration to remove tokens which are entirely numeric values or punctuation.</p> # <p>While a program may fail to build context from "While waiting at a bus stop in 1981" (<em>Forrest Gump</em>), because this string would not match in any dictionary, it is possible to build context from the words "while", "waiting" or "bus" because they are present in the English dictionary. </p> # <p>Let us perform tokenization on a small extract from <em>The Godfather</em>.</p> # + dc={"key": "17"} tags=["sample_code"] # Tokenize a paragraph into sentences and store in sent_tokenized sent_tokenized = [sent for sent in nltk.sent_tokenize(""" Today (May 19, 2016) is his only daughter's wedding. <NAME> is the Godfather. """)] # Word Tokenize first sentence from sent_tokenized, save as words_tokenized words_tokenized = [word for word in nltk.word_tokenize(sent_tokenized[0])] # Remove tokens that do not contain any letters from words_tokenized import re filtered = [word for word in words_tokenized if re.search('[A-Za-z]', word)] # Display filtered words to observe words after tokenization filtered # + dc={"key": "24"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 4. Stemming # <p>Stemming is the process by which we bring down a word from its different forms to the root word. This helps us establish meaning to different forms of the same words without having to deal with each form separately. For example, the words 'fishing', 'fished', and 'fisher' all get stemmed to the word 'fish'.</p> # <p>Consider the following sentences:</p> # <ul> # <li>"Young W<NAME> witnesses the treachery of Longshanks" ~ <em>Gladiator</em></li> # <li>"escapes to the city walls only to witness Cicero's death" ~ <em>Braveheart</em></li> # </ul> # <p>Instead of building separate dictionary entries for both witnesses and witness, which mean the same thing outside of quantity, stemming them reduces them to 'wit'.</p> # <p>There are different algorithms available for stemming such as the Porter Stemmer, Snowball Stemmer, etc. We shall use the Snowball Stemmer.</p> # + dc={"key": "24"} tags=["sample_code"] # Import the SnowballStemmer to perform stemming # ... YOUR CODE FOR TASK 4 ... from nltk.stem.snowball import SnowballStemmer # Create an English language SnowballStemmer object stemmer = SnowballStemmer("english") # Print filtered to observe words without stemming print("Without stemming: ", filtered) # Stem the words from filtered and store in stemmed_words stemmed_words = [stemmer.stem(word) for word in filtered] # Print the stemmed_words to observe words after stemming print("After stemming: ", stemmed_words) # + dc={"key": "31"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 5. Club together Tokenize & Stem # <p>We are now able to tokenize and stem sentences. But we may have to use the two functions repeatedly one after the other to handle a large amount of data, hence we can think of wrapping them in a function and passing the text to be tokenized and stemmed as the function argument. Then we can pass the new wrapping function, which shall perform both tokenizing and stemming instead of just tokenizing, as the tokenizer argument while creating the TF-IDF vector of the text. </p> # <p>What difference does it make though? Consider the sentence from the plot of <em>The Godfather</em>: "Today (May 19, 2016) is his only daughter's wedding." If we do a 'tokenize-only' for this sentence, we have the following result:</p> # <blockquote> # <p>'today', 'may', 'is', 'his', 'only', 'daughter', "'s", 'wedding'</p> # </blockquote> # <p>But when we do a 'tokenize-and-stem' operation we get:</p> # <blockquote> # <p>'today', 'may', 'is', 'his', 'onli', 'daughter', "'s", 'wed'</p> # </blockquote> # <p>All the words are in their root form, which will lead to a better establishment of meaning as some of the non-root forms may not be present in the NLTK training corpus.</p> # + dc={"key": "31"} tags=["sample_code"] # Define a function to perform both stemming and tokenization def tokenize_and_stem(text): # Tokenize by sentence, then by word tokens = [word for sentence in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sentence)] # Filter out raw tokens to remove noise filtered_tokens = [token for token in tokens if re.search('[a-zA-Z]', token)] # Stem the filtered_tokens stems = [stemmer.stem(word) for word in filtered_tokens] return stems words_stemmed = tokenize_and_stem("Today (May 19, 2016) is his only daughter's wedding.") print(words_stemmed) # + dc={"key": "38"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 6. Create TfidfVectorizer # <p>Computers do not <em>understand</em> text. These are machines only capable of understanding numbers and performing numerical computation. Hence, we must convert our textual plot summaries to numbers for the computer to be able to extract meaning from them. One simple method of doing this would be to count all the occurrences of each word in the entire vocabulary and return the counts in a vector. Enter <code>CountVectorizer</code>.</p> # <p>Consider the word 'the'. It appears quite frequently in almost all movie plots and will have a high count in each case. But obviously, it isn't the theme of all the movies! <a href="https://campus.datacamp.com/courses/natural-language-processing-fundamentals-in-python/simple-topic-identification?ex=11">Term Frequency-Inverse Document Frequency</a> (TF-IDF) is one method which overcomes the shortcomings of <code>CountVectorizer</code>. The Term Frequency of a word is the measure of how often it appears in a document, while the Inverse Document Frequency is the parameter which reduces the importance of a word if it frequently appears in several documents.</p> # <p>For example, when we apply the TF-IDF on the first 3 sentences from the plot of <em>The Wizard of Oz</em>, we are told that the most important word there is 'Toto', the pet dog of the lead character. This is because the movie begins with 'Toto' biting someone due to which the journey of Oz begins!</p> # <p>In simplest terms, TF-IDF recognizes words which are unique and important to any given document. Let's create one for our purposes.</p> # + dc={"key": "38"} tags=["sample_code"] # Import TfidfVectorizer to create TF-IDF vectors from sklearn.feature_extraction.text import TfidfVectorizer # Instantiate TfidfVectorizer object with stopwords and tokenizer # parameters for efficient processing of text tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.2, stop_words='english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,3)) # + dc={"key": "45"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 7. Fit transform TfidfVectorizer # <p>Once we create a TF-IDF Vectorizer, we must fit the text to it and then transform the text to produce the corresponding numeric form of the data which the computer will be able to understand and derive meaning from. To do this, we use the <code>fit_transform()</code> method of the <code>TfidfVectorizer</code> object. </p> # <p>If we observe the <code>TfidfVectorizer</code> object we created, we come across a parameter stopwords. 'stopwords' are those words in a given text which do not contribute considerably towards the meaning of the sentence and are generally grammatical filler words. For example, in the sentence '<NAME> lives with her dog Toto on the farm of her Aunt Em and <NAME>', we could drop the words 'her' and 'the', and still have a similar overall meaning to the sentence. Thus, 'her' and 'the' are stopwords and can be conveniently dropped from the sentence. </p> # <p>On setting the stopwords to 'english', we direct the vectorizer to drop all stopwords from a pre-defined list of English language stopwords present in the nltk module. Another parameter, <code>ngram_range</code>, defines the length of the ngrams to be formed while vectorizing the text.</p> # + dc={"key": "45"} tags=["sample_code"] # Fit and transform the tfidf_vectorizer with the "plot" of each movie # to create a vector representation of the plot summaries tfidf_matrix = tfidf_vectorizer.fit_transform([x for x in movies_df["plot"]]) print(tfidf_matrix.shape) # + dc={"key": "52"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 8. Import KMeans and create clusters # <p>To determine how closely one movie is related to the other by the help of unsupervised learning, we can use clustering techniques. Clustering is the method of grouping together a number of items such that they exhibit similar properties. According to the measure of similarity desired, a given sample of items can have one or more clusters. </p> # <p>A good basis of clustering in our dataset could be the genre of the movies. Say we could have a cluster '0' which holds movies of the 'Drama' genre. We would expect movies like <em>Chinatown</em> or <em>Psycho</em> to belong to this cluster. Similarly, the cluster '1' in this project holds movies which belong to the 'Adventure' genre (<em>Lawrence of Arabia</em> and the <em>Raiders of the Lost Ark</em>, for example).</p> # <p>K-means is an algorithm which helps us to implement clustering in Python. The name derives from its method of implementation: the given sample is divided into <b><i>K</i></b> clusters where each cluster is denoted by the <b><i>mean</i></b> of all the items lying in that cluster. </p> # <p>We get the following distribution for the clusters:</p> # <p><img src="https://assets.datacamp.com/production/project_648/img/bar_clusters.png" alt="bar graph of clusters"></p> # + dc={"key": "52"} tags=["sample_code"] # Import k-means to perform clusters from sklearn.cluster import KMeans # Create a KMeans object with 5 clusters and save as km km = KMeans(n_clusters=5) # Fit the k-means object with tfidf_matrix km.fit(tfidf_matrix) clusters = km.labels_.tolist() # Create a column cluster to denote the generated cluster for each movie movies_df["cluster"] = clusters # Display number of films per cluster (clusters from 0 to 4) movies_df['cluster'].value_counts() # + dc={"key": "59"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 9. Calculate similarity distance # <p>Consider the following two sentences from the movie <em>The Wizard of Oz</em>: </p> # <blockquote> # <p>"they find in the Emerald City"</p> # <p>"they finally reach the Emerald City"</p> # </blockquote> # <p>If we put the above sentences in a <code>CountVectorizer</code>, the vocabulary produced would be "they, find, in, the, Emerald, City, finally, reach" and the vectors for each sentence would be as follows: </p> # <blockquote> # <p>1, 1, 1, 1, 1, 1, 0, 0</p> # <p>1, 0, 0, 1, 1, 1, 1, 1</p> # </blockquote> # <p>When we calculate the cosine angle formed between the vectors represented by the above, we get a score of 0.667. This means the above sentences are very closely related. <em>Similarity distance</em> is 1 - <a href="https://en.wikipedia.org/wiki/Cosine_similarity">cosine similarity angle</a>. This follows from that if the vectors are similar, the cosine of their angle would be 1 and hence, the distance between then would be 1 - 1 = 0.</p> # <p>Let's calculate the similarity distance for all of our movies.</p> # + dc={"key": "59"} tags=["sample_code"] # Import cosine_similarity to calculate similarity of movie plots from sklearn.metrics.pairwise import cosine_similarity # Calculate the similarity distance similarity_distance = 1 - cosine_similarity(tfidf_matrix) # + dc={"key": "66"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 10. Import Matplotlib, Linkage, and Dendrograms # <p>We shall now create a tree-like diagram (called a dendrogram) of the movie titles to help us understand the level of similarity between them visually. Dendrograms help visualize the results of hierarchical clustering, which is an alternative to k-means clustering. Two pairs of movies at the same level of hierarchical clustering are expected to have similar strength of similarity between the corresponding pairs of movies. For example, the movie <em>Fargo</em> would be as similar to <em>North By Northwest</em> as the movie <em>Platoon</em> is to <em>Saving Private Ryan</em>, given both the pairs exhibit the same level of the hierarchy.</p> # <p>Let's import the modules we'll need to create our dendrogram.</p> # + dc={"key": "66"} tags=["sample_code"] # Import matplotlib.pyplot for plotting graphs # ... YOUR CODE FOR TASK 10 ... import matplotlib.pyplot as plt # Configure matplotlib to display the output inline # %matplotlib inline # Import modules necessary to plot dendrogram # ... YOUR CODE FOR TASK 10 ... from scipy.cluster.hierarchy import linkage, dendrogram # + dc={"key": "73"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 11. Create merging and plot dendrogram # <p>We shall plot a dendrogram of the movies whose similarity measure will be given by the similarity distance we previously calculated. The lower the similarity distance between any two movies, the lower their linkage will make an intercept on the y-axis. For instance, the lowest dendrogram linkage we shall discover will be between the movies, <em>It's a Wonderful Life</em> and <em>A Place in the Sun</em>. This indicates that the movies are very similar to each other in their plots.</p> # + dc={"key": "73"} tags=["sample_code"] # Create mergings matrix mergings = linkage(similarity_distance, method='complete') # Plot the dendrogram, using title as label column dendrogram_ = dendrogram(mergings, labels=[x for x in movies_df["title"]], leaf_rotation=90, leaf_font_size=16, ) # Adjust the plot fig = plt.gcf() _ = [lbl.set_color('r') for lbl in plt.gca().get_xmajorticklabels()] fig.set_size_inches(108, 21) # Show the plotted dendrogram plt.show() # + dc={"key": "80"} deletable=false editable=false run_control={"frozen": true} tags=["context"] # ## 12. Which movies are most similar? # <p>We can now determine the similarity between movies based on their plots! To wrap up, let's answer one final question: which movie is most similar to the movie <em>Braveheart</em>?</p> # + dc={"key": "80"} tags=["sample_code"] # Answer the question ans = "Gladiator" print(ans)
Find Movie Similarity from Plot Summaries/notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/uwesterr/MlFastAiBlog/blob/master/_notebooks/2020-03-01-TrainDonkeyCar.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="BlmQIFSLZDdc" # # Train donkeycar with GPU support in Colab # > Use Colab to train a neural network for a donkey car # # - toc: true # - badges: true # - categories: [donkeycar, Colab] # - image: images/roboCar2020.jpg # - author: <NAME> # - comments: true # + [markdown] colab_type="text" id="Fg2cbgwvj86i" # # Credit # > Note: This notebook is based on https://colab.research.google.com/github/robocarstore/donkey-car-training-on-google-colab/blob/master/Donkey_Car_Training_using_Google_Colab.ipynb # + [markdown] id="ypOifoo14c4u" colab_type="text" # Get necessary libraries # + id="QlNn-cK93bj4" colab_type="code" colab={} #collapse-hide # %tensorflow_version 1.13.1 import tensorflow from google.colab import drive from google.colab import files from IPython.display import Image import glob import shutil # + [markdown] colab_type="text" id="arsH-DhLcihq" # # Step 1: Create environment # To train a neural network for the donkeycar we need a few components # - install donkeycar # - upload data via # - direct upload # - mount Google drive # # # + [markdown] colab_type="text" id="VBVjteqHtvg-" # > Note: Donkeycar at the time of writing in March 2020 uses Tensorflow 1.13, therefore version 1.xx is installed # + colab_type="code" id="uQgEhuoTcg0N" colab={} #collapse-show print(tensorflow.__version__) # + [markdown] colab_type="text" id="Ba2oPDIrsDFg" # ## Git Clone the donkeycar repository # + [markdown] colab_type="text" id="kGlk1n7OtvhE" # Get the latest donkeycar version from GitHub # > Note: The default branch is "dev", however, the documentation is for the master branch. # + colab_type="code" id="oOxd9PFUyNxI" outputId="cbe7e9cf-6715-4d0e-dc23-501a858ffaec" colab={"base_uri": "https://localhost:8080/", "height": 153} # !git clone https://github.com/autorope/donkeycar.git # %cd /content/donkeycar # !git checkout master # + [markdown] colab_type="text" id="9TkkcF-gsAnx" # ## Install donkey car # Different to the description at http://docs.donkeycar.com/guide/host_pc/setup_ubuntu/ we create no anaconda environment since the script is supposed to run on Colab which will delete the instance anyway once you disconnect the notebook. # + colab_type="code" id="jz_PZgrByPDh" outputId="a0972097-ad1e-46ca-c28a-7e16c9ec2c62" colab={"base_uri": "https://localhost:8080/", "height": 612} # !pip3 install -e .[pc] # + [markdown] colab_type="text" id="syCctLq2r4Wk" # ## Create Project # In this step the following actions take place # - create necessary folders (models, data, logs) # - copying necessary files into folders (manage.py, myconfig.py etc.) # # + colab_type="code" id="1xjJBSITyXy2" outputId="0fdea0a3-ecf6-4598-ac4b-4fa48572f426" colab={"base_uri": "https://localhost:8080/", "height": 221} # !donkey createcar --path /content/mycar # + [markdown] colab_type="text" id="SCf6uTHnO4Lh" # # Step 2: Supply Data # In order to train the neural network we need to supply trainings data which are recorded on the raspi during driving the donkeycar on the track # # # + [markdown] colab_type="text" id="dnUy1Z1zro77" # ## Step 2 opt A: Supply own data # If you have own data proceed here, if you want to use data which were made available via GitHub please continue to (link only works in blog not in Colab) [section](#GitHubData) **Supply GitHub hosted data** # # # + [markdown] colab_type="text" id="q4B2qbCc4Iaq" # ### Zip data on raspi # Copy the following code and run on raspi # > Note: Copying of the data is much faster if the data is zipped to one file. # # ```bash # # # cd ~/mycar/data # # either compress just one folder # tar -czf tub_xx_yyyy_mm_dd.tar.gz tub_xx_yyyy_mm_dd # # or all folders starting with "tub" # tar -czf trainingsData2020_03-01.tar.gz tub* # # # ``` # # This will create a tub_xx_yyyy_mm_dd.tar.gz file under ~/mycar/data # # ### Copy the zipped tub to your local PC # # Run this on your local pc if you are using linux/mac # ``` # sftp <EMAIL> # # # cd ~/mycar/data # get tub_xx_yyyy_mm_dd.tar.gz # ``` # # If you are on a windows, download sftp utility like [filezilla](https://filezilla-project.org/) or [putty](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html) # + [markdown] colab_type="text" id="wlwZuLKA56nt" # Define your tub name here # + colab_type="code" id="0ShFSsaewLCT" colab={} tub_name="tubVaihingenIIICleaned200126" # + [markdown] colab_type="text" id="W47gmXA0O4Lo" # ### Upload the tub from Google Drive # # First upload the tub_x_yyyy_mm_dd.tar.gz to Google Drive. We will then mount Google Drive from colab and copy the data from Drive directly. # > Note: To copy data from Google Drive to Colab is faster than uploading it from local machine. # # When you run the cell below, you will need to click the link and generate an authorization code to for colab to access your drive. # + colab_type="code" id="Bgp_wtENw_4n" outputId="6278f56c-a65e-460b-becd-84596f5f1ead" colab={"base_uri": "https://localhost:8080/", "height": 122} drive.mount('/content/drive') # + [markdown] colab_type="text" id="IfSDpXj9x16v" # Suppose you upload the tub_xx_yyyy_mm_dd.tar.gz to Google Drive/mycar/tub_xx_yyyy_mm_dd.tar.gz, this is how you copy it from Google Drive to colab # + colab_type="code" id="BIzWTrV-xwkJ" outputId="973f7aa3-c7a7-42b7-b4b0-ee5fdd6bf669" colab={"base_uri": "https://localhost:8080/", "height": 51} # %cd /content/mycar # !rm -rf data # !mkdir data # %cd /content/mycar/data # !cp /content/drive/My\ Drive/myCar/{tub_name}.tar.gz . # + [markdown] colab_type="text" id="QtwcEli6yFrw" # And untar it to the right place # + colab_type="code" id="urhFWBkUyGf0" outputId="552a7270-2f73-42a8-ed75-527aaaab985c" colab={"base_uri": "https://localhost:8080/", "height": 34} # %cd /content/mycar/data # !tar -xzf {tub_name}.tar.gz # + [markdown] colab_type="text" id="4CchMtMI6gfd" # Lets look at one image to see we got valid data # + colab_type="code" id="KsItvBTkzWcH" outputId="fca0d21a-4b31-430f-8d5f-5460840955a6" colab={"base_uri": "https://localhost:8080/", "height": 154} # %cd /content/mycar/data/tubVaihingenIIICleaned200126/ file = glob.glob("*.jpg") Image(file[100]) # + [markdown] colab_type="text" id="AXfVTJT6w7b2" # ### Check quality of data # You want data which has left and right turns preferably in equal shares. A histogram is a good tool to check if this is the case. You can use a donkeycar tool for that # # # ``` # donkey tubhist <tub_path> --rec=<"user/angle"> # ``` # The histogram shows that mainly the car drove straight ahead and more left turns than right turns. It is good practice to drive a course clock wise and anti clock wise to avoid this imbalance. # # + colab_type="code" id="OPPFtaxwx8gk" outputId="b37332c4-a40a-41fe-9e27-d81f86f98bce" colab={"base_uri": "https://localhost:8080/", "height": 599} # %cd /content/mycar # !donkey tubhist --tub data/{tub_name} --rec="user/angle" file = glob.glob("tubVaihingenIIICleaned200126_hist_user_angle.png") Image(file[0]) # + [markdown] colab_type="text" id="i4hNVVvFByr4" # Next step is to train your model in section [Upload local files](#uplaod_local_files) (Link only works in blog not in Colab) # + [markdown] colab_type="text" id="3Acwhbrv9ZfQ" # ## Supply GitHub hosted data # <a id='GitHubData'></a> # If you don't have own training data you might want to use an example data set # # > Note: The training data is cleaned ([tubclean](https://docs.donkeycar.com/utility/donkey/#clean-data-in-tub)) but whether or not you get a good working model out of it... # # See instructions below how to get the data into the Colab environment # # # # The training data are **hosted on GitHub** # - clone GitHub repo # - move file to data folder # - unzip file # + [markdown] colab_type="text" id="JhS9u7_B7ZHP" # ## Step 2 opt B: Use data from RoboCarEsslingen # The first data set we use is from [RoboCar Esslingen GitHub # ](https://github.com/RoboCarEsslingen) which is operated by the meetup group [Esslinger Makerspace Projekt: Autonomen RoboCar bauen](https://www.meetup.com/Esslingen-Makerspace/) located in Esslingen, Germany. # The data was recorded at [#8.7 Quarterly Hack: DIYrobocars Build & Race](https://www.meetup.com/Connected-Autonomous-Driving/events/259840582/) in Suttgart with kind support of Bosch. # If you want to use data from **Connected Atonomous Mobility** proceed in chapter (link only works in blog not in Colab) [section](#CamData) **Use data from Connected Autonomous Mobility** # + colab_type="code" id="r29a5dD19y61" colab={} # %cd /content/mycar # !rm -rf data # !mkdir data # %cd /content/mycar/data # !pwd # ! git clone --recursive https://github.com/RoboCarEsslingen/trainingData.git # + [markdown] colab_type="text" id="PPOnDZUJLp9f" # Move zip file to data folder and unzip # # + colab_type="code" id="UzP0MiBrK0wv" colab={} shutil.move("/content/mycar/data/trainingData/tubVaihingenIIICleaned200126.tar.gz", "/content/mycar/data") # + [markdown] colab_type="text" id="v489n2oANHIw" # Unzip the training data file # # + colab_type="code" id="r7clEZSZNGUd" colab={} # %cd /content/mycar/data # !tar -xzf tubVaihingenIIICleaned200126.tar.gz # + [markdown] colab_type="text" id="m8hntbOb8PZH" # #### Check quality of data # You want data which has left and right turns preferably in equal shares. A histogram is a good tool to check if this is the case. You can use a donkeycar tool for that # # # ``` # donkey tubhist <tub_path> --rec=<"user/angle"> # ``` # The histogram shows that mainly the car drove straight ahead and more left turns than right turns. It is good practice to drive a course clock wise and anti clock wise to avoid this imbalance. # # + colab_type="code" id="cnGVCR5_8PZI" colab={} # %cd /content/mycar # !donkey tubhist --tub data/{tub_name} --rec="user/angle" file = glob.glob("tubVaihingenIIICleaned200126_hist_user_angle.png") Image(file[0]) # + [markdown] colab_type="text" id="9f9n1KNr8l_y" # ## Use data from Connected Autonomous Mobility # <a id='CamData'></a> # Another data set is from [Connected Autonomous Mobility # ](https://github.com/connected-autonomous-mobility/20-data) # We clone the whole repo because I don't know how to download a single file from a GitHub repo, **if you know how to this than please leave a note the comment section.** # + colab_type="code" id="huBYp8Tu8l_0" colab={} # %cd /content/mycar # !rm -rf data # !mkdir data # %cd /content/mycar/data # !pwd # ! git clone --recursive https://github.com/connected-autonomous-mobility/20-data # + [markdown] colab_type="text" id="4mVN5ljz8l_3" # Move zip file to data folder and unzip # # + colab_type="code" id="FxDbItFT8l_3" colab={} import shutil shutil.move("/content/mycar/data/20-data/20190414-BOSCH-Solaris-Course/tub_36_19-04-13.zip", "/content/mycar/data") # + [markdown] colab_type="text" id="rJo3_Pk68l_5" # Unzip the training data file # # + colab_type="code" id="d7QyaWuZ8l_6" colab={} # %cd /content/mycar/data # !unzip tub_36_19-04-13.zip # + [markdown] id="TaRgu0Mb5J-G" colab_type="text" # Lets look at one image to get an impression what the car saw. # # ``` # # This is formatted as code # ``` # # # + id="Jg-t_CPP5JJC" colab_type="code" colab={} # %cd /content/mycar/data/tub_36_19-04-13/ file = glob.glob("*.jpg") Image(file[300]) # + [markdown] colab_type="text" id="hWgKMCa88l_8" # #### Check quality of data # You want data which has left and right turns preferably in equal shares. A histogram is a good tool to check if this is the case. You can use a donkeycar tool for that # # # ``` # donkey tubhist <tub_path> --rec=<"user/angle"> # ``` # The histogram shows that mainly the car drove straight ahead, left and right turns are pretty well balanced # # + colab_type="code" id="t0rx23db8l_8" colab={} tub_name="tub_36_19-04-13" # %cd /content/mycar # !donkey tubhist --tub data/{tub_name} --rec="user/angle" file = glob.glob("tub_36_19-04-13_hist_user_angle.png") Image(file[0]) # + [markdown] colab_type="text" id="lIdS-AyIxn6e" # # Step 3: Upload myconfig.py # <a id='uplaod_local_files'></a> # You can upload files from local machine as well, but probably is slower than above approach downloading files from Google Drive # + colab_type="code" id="gsHJGimwxv4F" colab={} # uploaded = files.upload() # + [markdown] colab_type="text" id="j4ABM1_VSgOF" # # # #### Get myconfig.py # The file `myconfig.py` has to be the identical during training and driving, therefore it makes sense to upload the `myconfig.py` which you are using on the car. # # > Note: In `myconfig.py` there are parameters which control the training such as: # # # ``` # # line parameter --type to the python manage.py train and drive commands. # # DEFAULT_MODEL_TYPE = 'linear' (linear|categorical|rnn|imu|behavior|3d|localizer|latent) # # BATCH_SIZE = 128 how many records to use when doing one pass of gradient decent. Use a smaller number if your gpu is running out of memory. # # TRAIN_TEST_SPLIT = 0.8 what percent of records to use for training. the remaining used for validation. # # MAX_EPOCHS = 100 how many times to visit all records of your data # # SHOW_PLOT = True would you like to see a pop up display of final loss? # # VEBOSE_TRAIN = True would you like to see a progress bar with text during training? # # USE_EARLY_STOP = True would you like to stop the training if we see it's not improving fit? # # EARLY_STOP_PATIENCE = 5 how many epochs to wait before no improvement # # MIN_DELTA = .0005 early stop will want this much loss change before calling it improved. # # PRINT_MODEL_SUMMARY = True print layers and weights to stdout # # OPTIMIZER = None adam, sgd, rmsprop, etc.. None accepts default # # LEARNING_RATE = 0.001 only used when OPTIMIZER specified # # LEARNING_RATE_DECAY = 0.0 only used when OPTIMIZER specified # # SEND_BEST_MODEL_TO_PI = False change to true to automatically send best model during training # # CACHE_IMAGES = True keep images in memory. will speed successive epochs, but crater if not enough mem. # # # # PRUNE_CNN = False This will remove weights from your model. The primary goal is to increase performance. # # PRUNE_PERCENT_TARGET = 75 The desired percentage of pruning. # # PRUNE_PERCENT_PER_ITERATION = 20 Percentage of pruning that is perform per iteration. # # PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 The max amount of validation loss that is permitted during pruning. # # PRUNE_EVAL_PERCENT_OF_DATASET = .05 percent of dataset used to perform evaluation of model. # RNN or 3D # SEQUENCE_LENGTH = 3 #some models use a number of images over time. This controls how many. # # # # Region of interest cropping # # # only supported in Categorical and Linear models. # ROI_CROP_TOP = 0 #the number of rows of pixels to ignore on the top of the image # ROI_CROP_BOTTOM = 0 #the number of rows of pixels to ignore on the bottom of the image # ``` # # # # # # + colab_type="code" id="qO5y5QlHSdWB" outputId="726d4818-ac02-48b3-98b1-904080801776" colab={"base_uri": "https://localhost:8080/", "height": 34} # %cd /content/mycar # !cp /content/drive/My\ Drive/myCar/myconfig.py . # + [markdown] colab_type="text" id="A1Y_btMs6FyH" # # Step 4: Train your model # <a id='train_model'></a> # There are several types of modes available: # # - linear # - categorical # - rnn # - imu # - behavior # - 3d # - localizer # And you can use pre-trained models by adding a flag # - `[--transfer=<model>]` # + [markdown] colab_type="text" id="ESiFEnkQ6WXp" # ## Step 4 opt A: Transfer learning using pre-trained model # > Note: You can use a pre-trained model and use transfer learning # + [markdown] colab_type="text" id="nRQ2eq6H84vl" # Do not forget to set the variables in myconfig.py # # # ``` # FREEZE_LAYERS = True # `#default False will allow all layers to be modified by training # # NUM_LAST_LAYERS_TO_TRAIN = 7 # `#when freezing layers, how many layers from the last should be allowed to train? # ``` # # # # # + [markdown] colab_type="text" id="yCs1rze4WgPo" # #### Upload pre-trained model # Upload model in case you want to use a pre-trained model for transfer learning. # To define which layers shall be trained and which shall be frozen set the parameters in `myconfig.py`` # # Model transfer options # # When copying weights during a model transfer operation, should we freeze a certain number of layers to the incoming weights and not allow them to change during training? # # ``` # FREEZE_LAYERS = False #default False will allow all layers to be modified by training # NUM_LAST_LAYERS_TO_TRAIN = 7 #when freezing layers, how many layers from the last should be allowed to train? # # ``` # # # + colab_type="code" outputId="6f6a235b-8744-4cf1-f680-62a76722a73d" id="7s5wQxd_WgPq" colab={"base_uri": "https://localhost:8080/", "height": 34} # %cd /content/mycar/models # !cp /content/drive/My\ Drive/myCar/base_linear.h5 . # + [markdown] id="sTCpODL8ZP6y" colab_type="text" # Plot the model structure # + id="4yZqtHsiZOKN" colab_type="code" outputId="922904f9-c908-4f3a-cb5f-45ea249c914f" colab={"base_uri": "https://localhost:8080/", "height": 1000} from tensorflow.keras.utils import plot_model from tensorflow.keras.models import load_model # %cd /content/mycar/models model = load_model('base_linear.h5') plot_model( model, to_file="model.png", show_shapes=False, show_layer_names=True, rankdir="TB", expand_nested=False, dpi=96, ) # + [markdown] id="oyUWgx4wW7Hw" colab_type="text" # ##### Start transfer learning of pre-trained model # Use the `manage.py` script to start training # + colab_type="code" id="0YHhDGJYvZ5u" outputId="f143891c-63fc-4af7-dfd9-7be13c04a4c6" colab={"base_uri": "https://localhost:8080/", "height": 1000} # !python /content/mycar/manage.py train --type=linear --transfer=/content/mycar/models/base_linear.h5 --model=/content/mycar/models/mypilot.h5 # + [markdown] colab_type="text" id="T3Ya8qEUAfOv" # ## Step 4 opt B: Train RNN model # The RNN model combines several images to calculate steering and throttle. # Use the `manage.py` script to start training # + colab_type="code" id="edH3xO_AVWXu" colab={} # !python /content/mycar/manage.py train --type rnn --model /content/mycar/models/mypilot.h5 --aug # + [markdown] id="LF7RJ3e1Mkiw" colab_type="text" # # Step 5: Check model and transfer data # To check the quality of the model we look at the loss curve and see how well commanded and predicted steering and throttle values match. # We transfer the data to the car and show how to start the self driving car. # # + [markdown] colab_type="text" id="rXzn1noJz5MQ" # ## Plot loss curve of model # The curve should show smaller loss vs epochs and the train and validation loss should not differ too much. # > Tip: If train loss is much smaller than validation loss your model might be overfitting. # # # + colab_type="code" id="AixQrFy_z3vv" outputId="df45edc7-31a0-4799-a0dd-a77cc4b01c27" colab={"base_uri": "https://localhost:8080/", "height": 514} # %cd /content/mycar/models file = glob.glob("*.png") Image(file[0]) # + [markdown] colab_type="text" id="ra7Hp3edyCZm" # ## Plot commands and predictions # You can use # # # ``` # donkey tubplot <tub_path> [--model=<model_path>] # ``` # # to plot the commands and predictions of steering and throttle # # # + colab_type="code" id="eqEy49rqyZ4T" outputId="27f2be9f-1ee8-4690-9b42-b0bb38e59227" colab={"base_uri": "https://localhost:8080/", "height": 1000} # %cd /content/mycar # !donkey tubplot --tub=data/ --tub=data/tubVaihingenIIICleaned200126 --model=models/mypilot.h5 file = glob.glob("/content/mycar/models/mypilot.h5_pred.png") Image(file[0]) # + [markdown] colab_type="text" id="6BEOJYH601O0" # ## Copy the trained model back to Donkey Car (Pi) # # Once the training is complete on Colab, download # # # * mypilot.h5 file from /content/mycar/models/ # * myconfig.py file from /content/mycar/ # # --- # # # # + colab_type="code" id="YtvyJpOdocjb" colab={} files.download('./mypilot.h5') # %cd /content/mycar files.download('myconfig.py') # + [markdown] colab_type="text" id="i7AIY6yBOCM-" # Alternatively, you can copy the model back to Google Drive too # + colab_type="code" id="7Dim4fCpOBo9" colab={} # !cp /content/mycar/models/mypilot.h5 /content/drive/My\ Drive/myCar/mypilot.h5 # + [markdown] colab_type="text" id="GpkOVzh86omO" # ## Copy the file from your PC or Mac to the Raspberry Pi using Filezilla or scp command. # # ``` # sftp <EMAIL> # # # cd mycar/models # put mypilot.h5 # ``` # + [markdown] colab_type="text" id="hfERkGy821Xy" # ## Start Autopilot on Pi # # # ```bash # # # cd ~/mycar # python manage.py drive --model models/mypilot.h5 --js # ``` # # + [markdown] colab_type="text" id="X93SodzAv9hV" # # Step 6: Bonus - Salient Object Visualization # The salient visualization gives an indication which parts of the image caused the highest activations in the model. Its a good method to understand what triggers the steering and indentify problems # - reflections # - distractions off the track # # > Note: It seems like the salient mode doesn't work for RNN networks # + colab_type="code" id="4AZvWSeiyqto" colab={} # # !pip install git+https://github.com/autorope/keras-vis.git # !pip uninstall keras-vis # !pip install git+https://github.com/sctse999/keras-vis # + colab_type="code" id="kKI37gVrv9Q8" colab={} # %cd /content/mycar # !donkey makemovie --tub data/{tub_name} --model models/mypilot.h5 --type linear --salient # + [markdown] colab_type="text" id="h7Rh-PlQFCkR" # Download the movie to local machine # + colab_type="code" id="IcUrgOq_pePV" colab={} # %cd /content/mycar # !ls -ahl files.download('tub_movie.mp4') # + [markdown] colab_type="text" id="H9naBRC-GZ82" # Or download the file to Google Drive # + colab_type="code" id="QwXuQ7vhGh6l" colab={} # !cp /content/mycar/tub_movie.mp4 /content/drive/My\ Drive/myCar/tub_movie.mp4
_notebooks/2020-03-01-TrainDonkeyCar.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/cifkao/confugue/blob/master/docs/pytorch_tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="oxoG-AkYrgF_" colab_type="text" # # Confugue # # - Installation: `pip install confugue` # - Docs: [confugue.readthedocs.io](https://confugue.readthedocs.io/) # - Code: [github.com/cifkao/confugue](https://github.com/cifkao/confugue) # # Confugue is a **hierarchical configuration framework** for Python. It provides a wrapper class for **nested configuration dictionaries** (usually loaded from YAML files), which can be used to easily configure complicated object hierarchies. # # This notebook is intended as a quick start guide for **deep learning** users. It uses PyTorch for example purposes, but it should be easy to follow even for people working with other frameworks like TensorFlow. It should also be stressed that Confugue is in no way limited to deep learning applications, and a [getting started guide](https://confugue.readthedocs.io/en/latest/general-guide.html) for general Python users is available. # + id="ssgW214GvgwE" colab_type="code" cellView="both" colab={} # !pip install confugue # + [markdown] id="Bp-tFUjQvTWr" colab_type="text" # ## Basic PyTorch example # We are going to start with a basic PyTorch model, adapted from the [CIFAR-10 tutorial](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html). First, let's see how we would code the model *without* using Confugue. # + id="NFPP6AlRwY0J" colab_type="code" colab={} import torch from torch import nn # + id="jQz5pD3Rwc-T" colab_type="code" colab={} class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.conv2 = nn.Conv2d(6, 16, 5) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(400, 120) self.fc2 = nn.Linear(120, 10) self.act = nn.ReLU() def forward(self, x): x = self.pool(self.act(self.conv1(x))) x = self.pool(self.act(self.conv2(x))) x = x.flatten(start_dim=1) x = self.act(self.fc1(x)) x = self.fc2(x) return x # + [markdown] id="5WhqzPywxnQU" colab_type="text" # ### Making it configurable # Instead of hard-coding all the hyperparameters like above, we want to be able to specify them in a configuration file. To do so, we are going to decorate our class with the `@configurable` decorator. This provides it with a magic `_cfg` property, giving it access to the configuration. We can then rewrite our `__init__` as follows: # + id="kOjVsNBSxsWh" colab_type="code" colab={} from confugue import configurable, Configuration # + id="cDbO0TQBxyu8" colab_type="code" colab={} @configurable class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = self._cfg['conv1'].configure(nn.Conv2d, in_channels=3) self.conv2 = self._cfg['conv2'].configure(nn.Conv2d) self.pool = self._cfg['pool'].configure(nn.MaxPool2d) self.fc1 = self._cfg['fc1'].configure(nn.Linear) self.fc2 = self._cfg['fc2'].configure(nn.Linear, out_features=10) self.act = self._cfg['act'].configure(nn.ReLU) def forward(self, x): x = self.pool(self.act(self.conv1(x))) x = self.pool(self.act(self.conv2(x))) x = x.flatten(start_dim=1) x = self.act(self.fc1(x)) x = self.fc2(x) return x # + [markdown] id="vR3t4og6ninS" colab_type="text" # Instead of creating each layer directly, we configure it with values from the corresponding section of the configuration file (which we will see in a moment). Notice that we can still specify arguments in the code (e.g. `in_channels=3` for the `conv1` layer), but these are treated as defaults and can be overridden in the configuration file if needed. # + [markdown] id="2OGCYOEK3Dz5" colab_type="text" # ### Loading configuration from a YAML file # Calling `Net()` directly would result in an error, since we haven't specified defaults for all the required parameters of each layer. # We therefore need to create a configuration file to supply them: # + id="l5HJiqwvyruu" colab_type="code" outputId="5346a39f-f78a-4493-9510-c1b9d0b92d03" colab={"base_uri": "https://localhost:8080/", "height": 35} # %%writefile config.yaml conv1: out_channels: 6 kernel_size: 5 conv2: in_channels: 6 out_channels: 16 kernel_size: 5 pool: kernel_size: 2 stride: 2 fc1: in_features: 400 out_features: 120 fc2: in_features: 120 # Note that we do not need to include the activation function ('act'), since it does not have any # required parameters. We could, however, override the type of the activation function itself # as follows: # act: # class: !!python/name:torch.nn.Tanh # + [markdown] id="ajtknZtaqd4H" colab_type="text" # We are now ready to load the file into a `Configuration` object and use it to configure our network: # + id="RtoXqZwm0_Bi" colab_type="code" outputId="99822892-815e-4b00-a093-1529c29ffd7b" colab={"base_uri": "https://localhost:8080/", "height": 55} cfg = Configuration.from_yaml_file('config.yaml') cfg # + id="7UKki67T2joX" colab_type="code" outputId="ed4b6f05-1ae7-457f-e929-53db80f7635f" colab={"base_uri": "https://localhost:8080/", "height": 160} cfg.configure(Net) # + [markdown] id="5YsfvyE23lAw" colab_type="text" # ## Nested configurables # One of the most useful features of Confugue is that `@configurable` classes and functions can use other configurables, and the structure of the configuration file will naturally follow this hierarchy. To see this in action, we are going to write a configurable `main` function which trains our simple model on the CIFAR-10 dataset. # + id="u6D1GX-v4T3Q" colab_type="code" colab={} import torchvision from torchvision import transforms @configurable def main(num_epochs=1, log_period=2000, *, _cfg): net = _cfg['net'].configure(Net) criterion = _cfg['loss'].configure(nn.CrossEntropyLoss) optimizer = _cfg['optimizer'].configure(torch.optim.SGD, params=net.parameters(), lr=0.001) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_data = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) train_loader = _cfg['data_loader'].configure(torch.utils.data.DataLoader, dataset=train_data, batch_size=4, shuffle=True, num_workers=2) for epoch in range(num_epochs): for i, batch in enumerate(train_loader): inputs, labels = batch optimizer.zero_grad() loss = criterion(net(inputs), labels) loss.backward() optimizer.step() if (i + 1) % log_period == 0: print(i + 1, loss.item()) # + id="iLv8a7xA7tGp" colab_type="code" outputId="17cb3a81-191b-45c2-a350-a2c226377de7" colab={"base_uri": "https://localhost:8080/", "height": 35} # %%writefile config.yaml net: conv1: out_channels: 6 kernel_size: 5 conv2: in_channels: 6 out_channels: 16 kernel_size: 5 pool: kernel_size: 2 stride: 2 fc1: in_features: 400 out_features: 120 fc2: in_features: 120 optimizer: class: !!python/name:torch.optim.Adam data_loader: batch_size: 8 num_epochs: 2 log_period: 1000 # + id="ij_Nfdz58IsD" colab_type="code" outputId="169c41aa-155d-4e7f-ff91-7cc12a767c1e" colab={"base_uri": "https://localhost:8080/", "height": 317, "referenced_widgets": ["e8dfe5ac9b2b4b7d9ba17ffac7599253", "42c58d2d87df48daa37dad3ab75d901f", "39bbf8a2c9fd40e6906345a5ba9d3b3b", "10fd57a913d44b40b30f3d05e29deebc", "69ff781fd2914c10a6eb98df0282fce0", "0a548709d6f84ec69127d8c1545da2bf", "f8a3dac5261f49a689098ebe67229aec", "21c76d27169942bcbc3f9e7fe962ac91"]} cfg = Configuration.from_yaml_file('config.yaml') cfg.configure(main) # + [markdown] id="JlyBZSNAC5UK" colab_type="text" # ## Configuring lists # The `configure_list` method allows us to configure a list of objects, with the parameters for each supplied from the configuration file. We are going to use this, in conjunction with `nn.Sequential`, to fully specify the model in the configuration file, so we won't need our `Net` class anymore. # + id="ZOr2UMx1DC9B" colab_type="code" outputId="554cdad3-c308-4ffb-a8dd-715c0ef62c71" colab={"base_uri": "https://localhost:8080/", "height": 35} # %%writefile config.yaml layers: - class: !!python/name:torch.nn.Conv2d in_channels: 3 out_channels: 6 kernel_size: 5 - class: !!python/name:torch.nn.ReLU - class: !!python/name:torch.nn.MaxPool2d kernel_size: 2 stride: 2 - class: !!python/name:torch.nn.Conv2d in_channels: 6 out_channels: 16 kernel_size: 5 - class: !!python/name:torch.nn.ReLU - class: !!python/name:torch.nn.MaxPool2d kernel_size: 2 stride: 2 - class: !!python/name:torch.nn.Flatten - class: !!python/name:torch.nn.Linear in_features: 400 out_features: 120 - class: !!python/name:torch.nn.ReLU - class: !!python/name:torch.nn.Linear in_features: 120 out_features: 10 # + [markdown] id="rKF8QjVPyssu" colab_type="text" # Creating the model then becomes a matter of two lines of code: # + id="VT9g-sOsEAYk" colab_type="code" outputId="01731ee6-c9ad-40f0-e171-93155e8f9a81" colab={"base_uri": "https://localhost:8080/", "height": 229} cfg = Configuration.from_yaml_file('config.yaml') nn.Sequential(*cfg['layers'].configure_list()) # + [markdown] id="vdCdiFT2y00-" colab_type="text" # This offers a lot of flexibility, but it should be used with care. If your configuration file is longer than your code, you might be overusing it. # + [markdown] id="VfcYJa0mzuLe" colab_type="text" # ## Further reading # Confugue offers a couple more useful features, which are described [in the documentation](https://confugue.readthedocs.io/en/latest/more-features.html). You can also check out the [API reference](https://confugue.readthedocs.io/en/latest/api.html).
docs/pytorch_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: OLCF-base (ipykernel) # language: python # name: python3 # --- # # Dataset E: 100 hosts sample (among 4,626 hosts) for all dates # + [markdown] tags=[] # # Dask Setup # - # # workers x memory_per_worker <= available memory # threads per worker == 1 if workload is CPU intensive # dashboard port might need to change if running multiple dask instances within lab # # Sizing below is based on the basic jupyterlab environment provided by https://jupyter.olcf.ornl.gov # WORKERS = 16 MEMORY_PER_WORKER = "2GB" THREADS_PER_WORKER = 1 DASHBOARD_PORT = ":8787" # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ## Local Dask cluster setup # # * Install bokeh, spawn cluster, provide access point to dashboards # * Access jupyter hub at the address - https://jupyter.olcf.ornl.gov/hub/user-redirect/proxy/8787/status") # * Or access point for the Dask jupyter extension - /proxy/8787 # + tags=[] # General prerequisites we want to have loaded from the get go # !pip install bokeh loguru # - # Cleanup try: client.shutdown() client.close() except Exception as e: pass # + # Setup block import os import pwd import glob import pandas as pd from distributed import LocalCluster, Client import dask import dask.dataframe as dd #LOCALDIR = "/gpfs/alpine/stf218/scratch/shinw/.tmp/dask-interactive" LOCALDIR = "/tmp/dask" # - dask.config.set({'worker.memory': {'target': False, 'spill': False, 'pause': 0.8, 'terminate': 0.95}}) #dask.config.config # + # Cluster creation cluster = LocalCluster(processes=True, n_workers=WORKERS, threads_per_worker=THREADS_PER_WORKER, dashboard_address=DASHBOARD_PORT, local_directory=LOCALDIR, memory_limit=MEMORY_PER_WORKER) client = Client(cluster) cluster print("Access jupyter hub at the address - https://jupyter.olcf.ornl.gov/hub/user-redirect/proxy/8787/status") print("Dask jupyter extension - /proxy/8787") client # + [markdown] tags=[] # # Preloading tools & libraries # + tags=[] import sys import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt import pandas as pd import numpy as np import seaborn as sns print("seaborn version: {}".format(sns.__version__)) print("Python version:\n{}\n".format(sys.version)) print("matplotlib version: {}".format(matplotlib.__version__)) print("pandas version: {}".format(pd.__version__)) print("numpy version: {}".format(np.__version__)) # - # # File locations DATA_BASE_PATH = "../data" INPUT_FILES = f"{DATA_BASE_PATH}/powtemp_10sec_mean/**/*.parquet" INPUT_PATH = f"{DATA_BASE_PATH}/powtemp_10sec_mean" OUTPUT_PATH = f"{DATA_BASE_PATH}/e_full_10sec_100hosts" # + tags=[] # !ls {INPUT_FILES} # + [markdown] tags=[] # # Schema Globals # # Schema related global variables # - # Developing a COLUMN filter we can use to process the data RAW_COLUMN_FILTER = [ # Meta information 'timestamp', 'node_state', 'hostname', # Node input power (power supply) 'ps0_input_power', 'ps1_input_power', # Power consumption (Watts) # - GPU power 'p0_gpu0_power', 'p0_gpu1_power', 'p0_gpu2_power', 'p1_gpu0_power', 'p1_gpu1_power', 'p1_gpu2_power', # - CPU power 'p0_power', 'p1_power', # Thermal (Celcius) # - V100 core temperature 'gpu0_core_temp', 'gpu1_core_temp', 'gpu2_core_temp', 'gpu3_core_temp', 'gpu4_core_temp', 'gpu5_core_temp', # - V100 mem temperature (HBM memory) 'gpu0_mem_temp', 'gpu1_mem_temp', 'gpu2_mem_temp', 'gpu3_mem_temp', 'gpu4_mem_temp', 'gpu5_mem_temp', # - CPU core temperatures 'p0_core0_temp', 'p0_core1_temp', 'p0_core2_temp', 'p0_core3_temp', 'p0_core4_temp', 'p0_core5_temp', 'p0_core6_temp', 'p0_core7_temp', 'p0_core8_temp', 'p0_core9_temp', 'p0_core10_temp', 'p0_core11_temp', 'p0_core12_temp', 'p0_core14_temp', 'p0_core15_temp', 'p0_core16_temp', 'p0_core17_temp', 'p0_core18_temp', 'p0_core19_temp', 'p0_core20_temp', 'p0_core21_temp', 'p0_core22_temp', 'p0_core23_temp', 'p1_core0_temp', 'p1_core1_temp', 'p1_core2_temp', 'p1_core3_temp', 'p1_core4_temp', 'p1_core5_temp', 'p1_core6_temp', 'p1_core7_temp', 'p1_core8_temp', 'p1_core9_temp', 'p1_core10_temp', 'p1_core11_temp', 'p1_core12_temp', 'p1_core14_temp', 'p1_core15_temp', 'p1_core16_temp', 'p1_core17_temp', 'p1_core18_temp', 'p1_core19_temp', 'p1_core20_temp', 'p1_core21_temp', 'p1_core22_temp', 'p1_core23_temp', ] # Column lists we actually end up using COLS = [ # Meta information 'timestamp', 'node_state', 'hostname', # Node input power (power supply) 'ps0_input_power', 'ps1_input_power', # Power consumption (Watts) # - GPU power 'p0_gpu0_power', 'p0_gpu1_power', 'p0_gpu2_power', 'p1_gpu0_power', 'p1_gpu1_power', 'p1_gpu2_power', # - CPU power 'p0_power', 'p1_power', # Thermal (Celcius) # - V100 core temperature 'gpu0_core_temp', 'gpu1_core_temp', 'gpu2_core_temp', 'gpu3_core_temp', 'gpu4_core_temp', 'gpu5_core_temp', # - V100 mem temperature (HBM memory) 'gpu0_mem_temp', 'gpu1_mem_temp', 'gpu2_mem_temp', 'gpu3_mem_temp', 'gpu4_mem_temp', 'gpu5_mem_temp', ] # + # Columns in order to calculate the row-wise min,max,mean P0_CORES = ["p0_core0_temp", "p0_core1_temp", "p0_core2_temp", "p0_core3_temp", "p0_core4_temp", "p0_core5_temp", "p0_core6_temp", "p0_core7_temp", "p0_core8_temp", "p0_core9_temp", "p0_core10_temp", "p0_core11_temp", "p0_core12_temp", #"p0_core13_temp", "p0_core14_temp", "p0_core15_temp", "p0_core16_temp", "p0_core17_temp", "p0_core18_temp", "p0_core19_temp", "p0_core20_temp", "p0_core21_temp", "p0_core22_temp", "p0_core23_temp"] P1_CORES = ["p1_core0_temp", "p1_core1_temp", "p1_core2_temp", "p1_core3_temp", "p1_core4_temp", "p1_core5_temp", "p1_core6_temp", "p1_core7_temp", "p1_core8_temp", "p1_core9_temp", "p1_core10_temp", "p1_core11_temp", "p1_core12_temp", #"p1_core13_temp", "p1_core14_temp", "p1_core15_temp", "p1_core16_temp", "p1_core17_temp", "p1_core18_temp", "p1_core19_temp", "p1_core20_temp", "p1_core21_temp", "p1_core22_temp", "p1_core23_temp"] # + [markdown] tags=[] # # Sampling & coarsening the data and creating a sampled dataset # # Utilize map partitions feature and create a few samples from 4,626 nodes in 1 minute increments. # Trying to see if we can randomize from the partitions as well to reduce the I/O happening. # + # Definition of the whole pipeline import os import shutil import random import glob def find_work_to_do(output_path, input_path): return [ os.path.basename(file).split(".")[0] for file in sorted(glob.glob(f"{input_path}/**/*.parquet")) if not os.access( os.path.join( output_path, os.path.basename(file) ), os.F_OK ) ] def handle_part(df): # Aggregate core temp df['p0_temp_max'] = df.loc[:,tuple(P0_CORES)].max(axis=1) df['p0_temp_min'] = df.loc[:,tuple(P0_CORES)].min(axis=1) df['p0_temp_mean'] = df.loc[:,tuple(P0_CORES)].mean(axis=1) df['p1_temp_max'] = df.loc[:,tuple(P1_CORES)].max(axis=1) df['p1_temp_min'] = df.loc[:,tuple(P1_CORES)].min(axis=1) df['p1_temp_mean'] = df.loc[:,tuple(P1_CORES)].mean(axis=1) COL_LIST = COLS + ['p0_temp_max', 'p0_temp_mean', 'p0_temp_min', 'p1_temp_max', 'p1_temp_mean', 'p1_temp_min'] return df.loc[:, tuple(COL_LIST)] def sample_hosts(output_path, input_path, hostnames=[], nhosts=1): # Limiting the # of files work_to_do = find_work_to_do(output_path, input_path) print(work_to_do) # Get random hostnames if hostnames == []: files = sorted(glob.glob(f"{input_path}/**/*.parquet")) ddf = dd.read_parquet( files[0], index=False, columns=RAW_COLUMN_FILTER, engine="pyarrow", split_row_groups=True, gather_statistics=True) df = ddf.get_partition(0).compute().set_index('hostname') hostnames = random.sample(df.index.unique().to_list(), nhosts) del ddf del df with open(f"{output_path}/hosts.txt", "w") as f: for host in sorted(hostnames): f.write(f"{host}\r\n") for date_key in work_to_do: print(f" - sample day working on {date_key}") month_key = date_key[0:6] day_input_path = f"{input_path}/{month_key}/{date_key}.parquet" day_output_path = f"{output_path}/{date_key}.parquet" print(f"Day output path {day_output_path}") os.makedirs(os.path.dirname(day_output_path), exist_ok=True) ddf = dd.read_parquet( [day_input_path], index=False, columns=RAW_COLUMN_FILTER, engine="pyarrow", split_row_groups=True, gather_statistics=True) # Get only the hosts we are interested hostname_mask = ddf['hostname'].isin(hostnames) # Calculate the aggregates and dump the result df = ddf[hostname_mask].map_partitions(handle_part).compute() # Sort the day before sending it out df = df.sort_values(['hostname', 'timestamp']) # Write to the final file df.to_parquet(day_output_path, engine="pyarrow") # + tags=[] sample_hosts(OUTPUT_PATH, INPUT_PATH, nhosts=100) # - # # Testing the output data # + import glob import pandas as pd def get_host_dataframe( input_path = OUTPUT_PATH, hostnames = [], months = ["202001", "202008", "202102", "202108", "202201"], sort_values=["hostname", "timestamp"], set_index=["hostname"], columns=None, ): print(f"[reading time series for {hostnames} during {months}]") if columns != None: if "hostname" not in columns: columns.push("hostname") if "timestamp" not in columns: columns.push("timestamp") # Iterate all the files and fetch data for only the hostnames we're interested df_list = [] for month in months: print(f"- reading {month}") files = sorted(glob.glob(f"{input_path}/{month}*.parquet")) for file in files: df = pd.read_parquet(file, engine="pyarrow", columns=columns) if hostnames != []: mask = df['hostname'].isin(hostnames) df_list.append(df[mask]) else: df_list.append(df) print("- merging dataframe") df = pd.concat(df_list).reset_index(drop=True) print(f"- sorting based on {sort_values}") if sort_values != []: df = df.sort_values(sort_values) if set_index != []: df = df.set_index(set_index) print("- read success") return df # - df = get_host_dataframe(hostnames = ['f04n08', 'e34n12'], columns=["timestamp", "hostname", "ps0_input_power", "ps1_input_power"]) df.info() df
src/e_full_10sec_100hosts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.2 64-bit (''venv'': venv)' # name: python382jvsc74a57bd0d46d14bfc67a8d69f82f77af2e60e5b02fcabf25a759a56cf61072a2d590541d # --- import pandas as pd ratings = pd.read_csv("/Users/alexigna/projects/skillfactory/unit1/00_ratings.csv") movies = pd.read_csv("/Users/alexigna/projects/skillfactory/unit1/00_movies.csv") # # Задание 3 # Сколько раз была выставлена низшая оценка 0.5 в наших рейтингах? Используйте файл ratings.csv. ratings[ratings["rating"]==ratings["rating"].min()].count() # # Задание 4 # Объедините датафреймы ratings и movies, используя параметр how='outer'. # Сколько строк в получившемся датафрейме? merged = ratings.merge(movies, on="movieId", how="outer") merged.info() # # Задание 5 # Найдите в датафрейме movies фильм с movieId=3456. # Какой у него год выпуска? # P. S. Попробуйте найти movieId этого фильма в датафрейме ratings. Это будет ключом к ответу на следующее задание. # # movies[movies["movieId"]==3456] ratings[ratings["movieId"]==3456]
unit_1/python-7.15.ipynb
# # Models (TensorFlow) # Install the Transformers and Datasets libraries to run this notebook. # !pip install datasets transformers[sentencepiece] # + from transformers import BertConfig, TFBertModel # Building the config config = BertConfig() # Building the model from the config model = TFBertModel(config) # - print(config) # + from transformers import BertConfig, TFBertModel config = BertConfig() model = TFBertModel(config) # Model is randomly initialized! # + from transformers import TFBertModel model = TFBertModel.from_pretrained("bert-base-cased") # - model.save_pretrained("directory_on_my_computer") sequences = ["Hello!", "Cool.", "Nice!"] encoded_sequences = [ [101, 7592, 999, 102], [101, 4658, 1012, 102], [101, 3835, 999, 102], ] # + import tensorflow as tf model_inputs = tf.constant(encoded_sequences) # - output = model(model_inputs)
notebooks/course/chapter2/section3_tf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline import pandas as pd, imp from sqlalchemy import create_engine # # Update TOC trends analysis # # Tore has previously written code to calculate [Mann-Kendall (M-K)](https://cran.r-project.org/web/packages/trend/vignettes/trend.pdf) trend statistics and [Sen's slope](https://en.wikipedia.org/wiki/Theil%E2%80%93Sen_estimator) estimates for data series in RESA2. According to my notes from a meeting with Tore on 13/05/2016, the workflow goes something like this: # # 1. Run code to extract and summarise time series from RESA2, insert this data into *Mann-Kendall_Sen.xls*, then read the results back into a new table in RESA2 called e.g. `ICPW_STATISTICS`. <br><br> # # 2. Run the `ICPStat` query in *Icp-waters2001_2000.accdb* to summarise the data in `ICPW_STATISTICS`. This creates a new table currently called `aaa`, but Tore says he'll rename it to something more descriptive before he leaves. <br><br> # # 3. Run the `export()` subroutine in the `Export` module of *Icp-waters2001_2000.accdb* to reformat the `aaa` table and write the results to an Excel file. # # *Mann-Kendall_Sen.xls* is an early version of the popular Excel macro **MULTIMK/CONDMK**, which Tore has modified slightly for use in this analysis. (A more recent version of the same file is available [here](http://taurus.gg.bg.ut.ee/jaagus/MKtingtsirk.xls)). This Excel macro permits some quite sophisticated multivariate and conditional analyses, but as far as I can tell the TOC trends code is only making use of the most basic functionality - performing repeated independent trend tests on annually summarised time series. # # Unfortunately, although the workflow above makes sense, I've so far failed to find and run Tore's code for step 1 (I can find everything for steps 2 and 3, but not the code for interacting with the Excel workbook). It also seems a bit messy to be switching back and forth between RESA2, Excel and Access in this way, so the code here is a first step towards refactoring the whole analysis into Python. # # ## 1. Test data # # The *Mann-Kendall_Sen.xls* file on the network already had some example ICPW data in it, which I can use to test my code. The raw input data and the results obtained from the Excel macro are saved as *mk_sen_test_data.xlsx*. # + # Read data and results from the Excel macro in_xlsx = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\TOC_Trends_Analysis_2015' r'\Data\mk_sen_test_data.xlsx') raw_df = pd.read_excel(in_xlsx, sheetname='input') res_df = pd.read_excel(in_xlsx, sheetname='results') raw_df # - res_df # ## 2. Statistical functions # # Looking at the output in the `ICPW_STATISTICS3` table of RESA2, we need to calculate the following statistcs (only some of which are output by the Excel macro): # # * Number of non-missing values # * Median # * Mean # * Period over which data are available (start and end years) # * Standard deviation (of the data) # * Standard deviation (expected under the null hypothesis of the M-K test) # * M-K statistic # * Normalised M-K statistic $\left(= \frac{M-K \; statistic}{Standard \; deviation} \right)$ # * M-K p-value # * Sen's slope (a.k.a. the Theil-Sen slope) # # Most of these should be quite straightforward. We'll start off by defining a function to calculate the M-K statistic (note that Scipy already has a function for the Theil-Sen slope). We'll also define another function to bundle everything together and return a dataframe of the results. # + def mk_test(x, stn_id, par, alpha=0.05): """ Adapted from http://pydoc.net/Python/ambhas/0.4.0/ambhas.stats/ by <NAME>. Perform the MK test for monotonic trends. Uses the "normal approximation" to determine significance and therefore should only be used if the number of values is >= 10. Args: x: 1D array of data name: Name for data series (string) alpha: Significance level Returns: var_s: Variance of test statistic s: M-K test statistic z: Normalised test statistic p: p-value of the significance test trend: Whether to reject the null hypothesis (no trend) at the specified significance level. One of: 'increasing', 'decreasing' or 'no trend' """ import numpy as np from scipy.stats import norm n = len(x) if n < 10: print (' Data series for %s at site %s has fewer than 10 non-null values. ' 'Significance estimates may be unreliable.' % (par, int(stn_id))) # calculate S s = 0 for k in xrange(n-1): for j in xrange(k+1,n): s += np.sign(x[j] - x[k]) # calculate the unique data unique_x = np.unique(x) g = len(unique_x) # calculate the var(s) if n == g: # there is no tie var_s = (n*(n-1)*(2*n+5))/18. else: # there are some ties in data tp = np.zeros(unique_x.shape) for i in xrange(len(unique_x)): tp[i] = sum(unique_x[i] == x) # Sat Kumar's code has "+ np.sum", which is incorrect var_s = (n*(n-1)*(2*n+5) - np.sum(tp*(tp-1)*(2*tp+5)))/18. if s>0: z = (s - 1)/np.sqrt(var_s) elif s == 0: z = 0 elif s<0: z = (s + 1)/np.sqrt(var_s) else: z = np.nan # calculate the p_value p = 2*(1-norm.cdf(abs(z))) # two tail test h = abs(z) > norm.ppf(1-alpha/2.) if (z<0) and h: trend = 'decreasing' elif (z>0) and h: trend = 'increasing' elif np.isnan(z): trend = np.nan else: trend = 'no trend' return var_s, s, z, p, trend def wc_stats(raw_df, st_yr=None, end_yr=None): """ Calculate key statistics for the TOC trends analysis: 'station_id' 'par_id' 'non_missing' 'median' 'mean' 'std_dev' 'period' 'mk_std_dev' 'mk_stat' 'norm_mk_stat' 'mk_p_val' 'trend' 'sen_slp' Args: raw_df: Dataframe with annual data for a single station. Columns must be: [station_id, year, par1, par2, ... parn] st_yr: First year to include in analysis. Pass None to start at the beginning of the series end_year: Last year to include in analysis. Pass None to start at the beginning of the series Returns: df of key statistics. """ import numpy as np, pandas as pd from scipy.stats import theilslopes # Checking df = raw_df.copy() assert list(df.columns[:2]) == ['STATION_ID', 'YEAR'], 'Columns must be: [STATION_ID, YEAR, par1, par2, ... parn]' assert len(df['STATION_ID'].unique()) == 1, 'You can only process data for one site at a time' # Get just the period of interest if st_yr: df = df.query('YEAR >= @st_yr') if end_yr: df = df.query('YEAR <= @end_yr') # Get stn_id stn_id = df['STATION_ID'].iloc[0] # Tidy up df df.index = df['YEAR'] df.sort_index(inplace=True) del df['STATION_ID'], df['YEAR'] # Container for results data_dict = {'station_id':[], 'par_id':[], 'non_missing':[], 'median':[], 'mean':[], 'std_dev':[], 'period':[], 'mk_std_dev':[], 'mk_stat':[], 'norm_mk_stat':[], 'mk_p_val':[], 'trend':[], 'sen_slp':[]} # Loop over pars for col in df.columns: # 1. Station ID data_dict['station_id'].append(stn_id) # 2. Par ID data_dict['par_id'].append(col) # 3. Non-missing data_dict['non_missing'].append(pd.notnull(df[col]).sum()) # 4. Median data_dict['median'].append(df[col].median()) # 5. Mean data_dict['mean'].append(df[col].mean()) # 6. Std dev data_dict['std_dev'].append(df[col].std()) # 7. Period st_yr = df.index.min() end_yr = df.index.max() per = '%s-%s' % (st_yr, end_yr) data_dict['period'].append(per) # 8. M-K test # Drop missing values mk_df = df[[col]].dropna(how='any') # Only run stats if more than 1 valid value if len(mk_df) > 1: var_s, s, z, p, trend = mk_test(mk_df[col].values, stn_id, col) data_dict['mk_std_dev'].append(np.sqrt(var_s)) data_dict['mk_stat'].append(s) data_dict['norm_mk_stat'].append(z) data_dict['mk_p_val'].append(p) data_dict['trend'].append(trend) # 8. Sen's slope # First element of output gives median slope. Other results could # also be useful - see docs sslp = theilslopes(mk_df[col].values, mk_df.index, 0.95)[0] data_dict['sen_slp'].append(sslp) # Otherwise all NaN else: for par in ['mk_std_dev', 'mk_stat', 'norm_mk_stat', 'mk_p_val', 'trend', 'sen_slp']: data_dict[par].append(np.nan) # Build to df res_df = pd.DataFrame(data_dict) res_df = res_df[['station_id', 'par_id', 'period', 'non_missing', 'mean', 'median', 'std_dev', 'mk_stat', 'norm_mk_stat', 'mk_p_val', 'mk_std_dev', 'trend', 'sen_slp']] return res_df # - # ## 3. Perform comparison # Run analysis on test data and print results out_df = wc_stats(raw_df) del out_df['station_id'] out_df # And below is the output from the Excel macro for comparison. res_df # My code gives near-identical results to those from the Excel macro, although there are a few edge cases that might be worth investigating further. For example, if there are fewer than 10 non-null values, my code currently prints a warning. I'm not sure exactly what the Excel macro does yet, but in general it seems that for fewer than 10 values it's necessary to use a lookup table (see e.g. the `Instructions` sheet of the file [here](https://www.google.no/url?sa=t&rct=j&q=&esrc=s&source=web&cd=4&cad=rja&uact=8&ved=0ahUKEwia2cy5t_LNAhVECpoKHVngCqsQFggtMAM&url=https%3A%2F%2Fwww.researchgate.net%2Ffile.PostFileLoader.html%3Fid%3D55bba3666225ff21e88b4569%26assetKey%3DAS%253A273823084023809%25401442295918401&usg=AFQjCNGHCJHO6ab7otL2RMzw9zh7eaqTDg&sig2=sbLmEgIlfwOzJqOKO3gq-g&bvm=bv.126993452,d.bGs)). # # ## 4. Get data from RESA2 # # The next step is to read the correct data directly from RESA2 and summarise it to look like `raw_df`, above. Start off by connecting to the database. # + # Use custom RESA2 function to connect to db r2_func_path = r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\Upload_Template\useful_resa2_code.py' resa2 = imp.load_source('useful_resa2_code', r2_func_path) engine, conn = resa2.connect_to_resa2() # - # Looking at the `ICPW_STATISTICS` table in RESA2, it seems as though trends have been assessed for **14 parameters** and several **different time periods** for each site of interest. The length and number of time periods vary from site to site, so I'll need to **check with Heleen** regarding how these varaibles should be chosen. The 14 parameters are as follows: # # * ESO4 # * ESO4X # * ECl # * ESO4Cl # * TOC_DOC # * ECaEMg # * ECaXEMgX # * ENO3 # * Al # * ANC # * ALK # * HPLUS # * ESO4EClENO3 # * ENO3DIVENO3ESO4X # # Many of these quantities are unfamiliar to me, but presumably the equations for calculating them can be found in Tore's code (which I can't find at present). **Check with Heleen whether all of these are still required** and find equations as necessary. # # The other issue is how to aggregate the values in the database from their original temporal resolution to annual summaries. I assume the **median** annual value is probably appropriate in most cases, but it would be good to know what Tore did previosuly. # # For now, I'll focus on: # # 1. Extracting the data from the database for a specified time period, <br><br> # # 2. Calculating the required water chemistry parameters, <br><br> # # 3. Taking annual medians and <br><br> # # 4. Estimating the trend statistics. # # It should then be fairly easy to modify this code later as necessary. # # ### 4.1. Equations # # Some of the quantities listed above are straightforward to calculate. # # #### 4.1.1. Micro-equivalents per litre # # The Es in the parameter names are just unit conversions to micro-equivalents per litre: # # $$EPAR \; (\mu eq/l) = \frac{1.10^6 * valency}{molar \; mass \; (g/mol)} * PAR \; (g/l)$$ # # Molar masses and valencies for the key species listed above are given in the table below. # + # Tabulate chemical properties chem_dict = {'molar_mass':[96, 35, 40, 24, 14], 'valency':[2, 1, 2, 2, 1], 'resa2_ref_ratio':[0.103, 1., 0.037, 0.196, 'N/A']} chem_df = pd.DataFrame(chem_dict, index=['SO4', 'Cl', 'Ca', 'Mg', 'NO3-N']) chem_df = chem_df[['molar_mass', 'valency', 'resa2_ref_ratio']] chem_df # - # #### 4.1.2. Sea-salt corrected values # # The Xs are sea-salt corrected values (also sometimes denoted with an asterisk e.g. Ca\*). They are calculated by comparison to chloride concentrations, which are generall assumed to be conservative. The usual equation is: # # $$PARX = PAR_{sample} - \left[ \left( \frac{PAR}{Cl} \right)_{ref} * Cl_{sample} \right]$$ # # where $PAR_{sample}$ and $Cl_{sample}$ are the concentrations measured in the lake or river and $\left( \frac{PAR}{Cl} \right)_{ref}$ is (ideally) the long-term average concentration in incoming rainwater. In some cases the reference values are simply taken from sea water concentrations (ignoring effects such as evaporative fractionation etc.). # # I'm not sure what values to assume, but by rearranging the above equations and applying it to data extarcted from RESA2 I can back-calculate the reference values. For example, brief testing using data from Italy, Switzerland and the Czech Republic implies that RESA2 uses a **standard reference value for sulphate of 0.103**. # # The reference ratios inferred from RESA2 for the key species listed are given in the table above. # # **NB:** In doing this I've identified some additional erros in the database, where this correction has not beeen performed correctly. For some reason, ESO4X values have been set to zero, despite valid ESO4 and ECl measurements being available. The problem only affects a handful od sample, but could be enough to generate false trends. **Return to this later?** # # **NB2:** Leah's experiences with the RECOVER project suggest that assuming a single reference concentration for all countires in the world is a bad idea. For example, I believe in e.g. the Czech Republic and Italy it is usual **not** to calculate sea-salt corrected concentrations at all, because most of the chloride input comes from industry rather than marine sources. Rainwater concentrations are also likely to vary dramatically from place to place, especially given the range of geographic and climatic conditions covered by this project. **Check with Heleen**. # # #### 4.1.3. ANC # # **Need to calculate this ANC, ALK, HPLUS and ENO3DIVENO3ESO4X.** # ### 4.2. Choose projects # # The first step is to specify a list of RESA2 projects and get the stations associated with them. # + # Get stations for a specified list of projects proj_list = ['ICPW_TOCTRENDS_2015_CZ', 'ICPW_TOCTRENDS_2015_IT'] sql = ('SELECT station_id, station_code ' 'FROM resa2.stations ' 'WHERE station_id IN (SELECT UNIQUE(station_id) ' 'FROM resa2.projects_stations ' 'WHERE project_id IN (SELECT project_id ' 'FROM resa2.projects ' 'WHERE project_name IN %s))' % str(tuple(proj_list))) stn_df = pd.read_sql(sql, engine) stn_df # - # ### 4.3. Extract time series # # The next step is to get time series for the desired parameters for each of these stations. # + # Specify parameters of interest par_list = ['SO4', 'Cl', 'Ca', 'Mg', 'NO3-N', 'TOC', 'Al'] if 'DOC' in par_list: print ('The database treats DOC and TOC similarly.\n' 'You should probably enter "TOC" instead') # Check pars are valid if len(par_list)==1: sql = ("SELECT * FROM resa2.parameter_definitions " "WHERE name = '%s'" % par_list[0]) else: sql = ('SELECT * FROM resa2.parameter_definitions ' 'WHERE name in %s' % str(tuple(par_list))) par_df = pd.read_sql_query(sql, engine) assert len(par_df) == len(par_list), 'One or more parameters not valid.' # + # Get results for ALL pars for sites and period of interest if len(stn_df)==1: sql = ("SELECT * FROM resa2.water_chemistry_values2 " "WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples " "WHERE station_id = %s)" % stn_df['station_id'].iloc[0]) else: sql = ("SELECT * FROM resa2.water_chemistry_values2 " "WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples " "WHERE station_id IN %s)" % str(tuple(stn_df['station_id'].values))) wc_df = pd.read_sql_query(sql, engine) # Get all sample dates for sites and period of interest if len(stn_df)==1: sql = ("SELECT water_sample_id, station_id, sample_date " "FROM resa2.water_samples " "WHERE station_id = %s " % stn_df['station_id'].iloc[0]) else: sql = ("SELECT water_sample_id, station_id, sample_date " "FROM resa2.water_samples " "WHERE station_id IN %s " % str(tuple(stn_df['station_id'].values))) samp_df = pd.read_sql_query(sql, engine) # Join in par IDs based on method IDs sql = ('SELECT * FROM resa2.wc_parameters_methods') meth_par_df = pd.read_sql_query(sql, engine) wc_df = pd.merge(wc_df, meth_par_df, how='left', left_on='method_id', right_on='wc_method_id') # Get just the parameters of interest wc_df = wc_df.query('wc_parameter_id in %s' % str(tuple(par_df['parameter_id'].values))) # Join in sample dates wc_df = pd.merge(wc_df, samp_df, how='left', left_on='sample_id', right_on='water_sample_id') # Join in parameter units sql = ('SELECT * FROM resa2.parameter_definitions') all_par_df = pd.read_sql_query(sql, engine) wc_df = pd.merge(wc_df, all_par_df, how='left', left_on='wc_parameter_id', right_on='parameter_id') # Join in station codes wc_df = pd.merge(wc_df, stn_df, how='left', left_on='station_id', right_on='station_id') # Convert units wc_df['value'] = wc_df['value'] * wc_df['conversion_factor'] # Extract columns of interest wc_df = wc_df[['station_id', 'sample_date', 'name', 'value']] # Unstack wc_df.set_index(['station_id', 'sample_date', 'name'], inplace=True) wc_df = wc_df.unstack(level='name') wc_df.columns = wc_df.columns.droplevel() wc_df.reset_index(inplace=True) wc_df.columns.name = None wc_df.head() # - # ### 4.4. Aggregate to annual # + # Extract year from date column wc_df['year'] = wc_df['sample_date'].map(lambda x: x.year) del wc_df['sample_date'] # Groupby station_id and year grpd = wc_df.groupby(['station_id', 'year']) # Calculate median wc_df = grpd.agg('median') wc_df.head() # - # ### 4.4. Convert units and apply sea-salt correction # # I haven't calculated all 14 parameters here, as I'm not sure exactly what they all are. The ones I'm reasonably certain of are included below. # + # 1. Convert to ueq/l for par in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N']: val = chem_df.ix[par, 'valency'] mm = chem_df.ix[par, 'molar_mass'] if par == 'NO3-N': wc_df['ENO3'] = wc_df[par] * val / mm else: wc_df['E%s' % par] = wc_df[par] * val * 1000. / mm # 2. Apply sea-salt correction for par in ['ESO4', 'EMg', 'ECa']: ref = chem_df.ix[par[1:], 'resa2_ref_ratio'] wc_df['%sX' % par] = wc_df[par] - (ref*wc_df['ECl']) # 3. Calculate combinations # 3.1. ESO4 + ECl wc_df['ESO4_ECl'] = wc_df['ESO4'] + wc_df['ECl'] # 3.2. ECa + EMg wc_df['ECa_EMg'] = wc_df['ECa'] + wc_df['EMg'] # 3.3. ECaX + EMgX wc_df['ECaX_EMgX'] = wc_df['ECaX'] + wc_df['EMgX'] # 3.4. ESO4 + ECl + ENO3 wc_df['ESO4_ECl_ENO3'] = wc_df['ESO4'] + wc_df['ECl'] + wc_df['ENO3'] # 4. Delete unnecessary columns and tidy for col in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N']: del wc_df[col] wc_df.reset_index(inplace=True) wc_df.head() # - # ### 4.5. Calculate trends # + def process_water_chem_df(stn_df, wc_df, st_yr=None, end_yr=None): """ Calculate statistics for the stations, parameters and time periods specified. Args: stn_df: Dataframe of station_ids wc_df: Dataframe of water chemistry time series for stations and parameters of interest st_yr: First year to include in analysis. Pass None to start at the beginning of the series end_year: Last year to include in analysis. Pass None to start at the beginning of the series Returns: Dataframe of statistics """ # Container for output df_list = [] # Loop over sites for stn_id in stn_df['station_id']: # Extract data for this site df = wc_df.query('station_id == @stn_id') # Modify col names names = list(df.columns) names[:2] = ['STATION_ID', 'YEAR'] df.columns = names # Run analysis df_list.append(toc_stats(df, st_yr=st_yr, end_yr=end_yr)) res_df = pd.concat(df_list, axis=0) return res_df res_df = process_water_chem_df(stn_df, wc_df) res_df.head() # - # ## 5. Compare to previous trends analysis # # This seems to be working OK so far, but I need to do some more testing to see that my results more-or-less agree with those calculated previously by Tore. As a start, let's compare the results above with those in the `ICPW_STATISTICS3` table of RESA2, which is where (I think) Tore has saved his previous output. # + # Get results for test sites from RESA2 sql = ('SELECT * FROM resa2.icpw_statistics3 ' 'WHERE station_id IN %s' % str(tuple(stn_df['station_id'].values))) stat_df = pd.read_sql(sql, engine) # Get just the cols to compare to my output stat_df = stat_df[['station_id', 'parameter', 'period', 'nonmiss', 'average', 'median', 'stdev', 'test_stat', 'mk_stat', 'mkp', 'senslope']] stat_df.head(14).sort_values(by='parameter') # - # For e.g. site 23499, I can now re-run my code for the period from 1990 to 2004 and compare my results to those above. # + # Re-run python analysis for the period 1990 - 2004 res_df = process_water_chem_df(stn_df, wc_df, st_yr=1990, end_yr=2004) # Delete mk_std_dev as not relevant here del res_df['mk_std_dev'] res_df.head(14).sort_values(by='par_id') # - # **The numbers in the above two tables are *almost* identical**, which is actually pretty remarkable given that I'm second-guessing a lot of the decisions in Tore's analysis (having never seen his code) and also recoding everything from scratch. It certainly looks as though this will be a viable alternative for re-running the trends analysis. # # There are still some loose ends to tie up. In particular, I need to add a few more parameters to the trends calculation, but that shouldn't be difficult once I've spoken to Heleen to find out what they are and how to calculate them. In the meantime, I'm sufficiently happy with this output to move the code into a separate module and then continue to explore the data in a new notebook. # # **NB:** A nice way to visualise these results would be to create a google map, where each point is coloured according to 'increasing', 'decreasing' or 'no trend'. A pop-up on each point could then give the main summary statistics and a time series plot with the Sen's slope line overlaid. This would result in lots of points on top on each other, but users could filter the map to just show one parameter at a time to avoid things becoming too cluttered.
updated_toc_trends_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.2 64-bit # language: python # name: python3 # --- #CONSTANTS DATA_PATH = '../../data/Regression/raw/insurance.csv' EXPORT_DIR='../../data/Regression/processed/' EXPORT_PATH = '../../data/Regression/processed/3_preprocessed_df.pkl' # Load packages import pickle import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import OrdinalEncoder import os raw_df = pd.read_csv(DATA_PATH) raw_df raw_df['sex']=raw_df['sex'].map({'male':0,'female':1}) raw_df['smoker']=raw_df['smoker'].map({'yes':1,'no':0}) encoder= OrdinalEncoder() raw_df[['region']]=encoder.fit_transform(raw_df[['region']].astype(str)) raw_df print(raw_df['children'].value_counts()) print(raw_df['region'].value_counts()) # EXPORT DATA os.mkdir(EXPORT_DIR) df.to_pickle(EXPORT_PATH)
notebooks/Regression/01preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import imblearn import numpy as np import pandas as pd # # Loading data names = ["duration","protocol_type","service","flag","src_bytes", "dst_bytes","land","wrong_fragment","urgent","hot","num_failed_logins", "logged_in","num_compromised","root_shell","su_attempted","num_root", "num_file_creations","num_shells","num_access_files","num_outbound_cmds", "is_host_login","is_guest_login","count","srv_count","serror_rate", "srv_serror_rate","rerror_rate","srv_rerror_rate","same_srv_rate", "diff_srv_rate","srv_diff_host_rate","dst_host_count","dst_host_srv_count", "dst_host_same_srv_rate","dst_host_diff_srv_rate","dst_host_same_src_port_rate", "dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate", "dst_host_rerror_rate","dst_host_srv_rerror_rate","target"] df = pd.read_csv('kddcup.csv', names=names) df.drop('num_outbound_cmds', axis=1, inplace=True) df.drop('is_host_login', axis=1, inplace=True) df['protocol_type'] = df['protocol_type'].astype('category') df['service'] = df['service'].astype('category') df['flag'] = df['flag'].astype('category') cat_columns = df.select_dtypes(['category']).columns df[cat_columns] = df[cat_columns].apply(lambda x: x.cat.codes) df.drop_duplicates(subset=None, keep='first', inplace=True) df.shape target_count = df.target.value_counts() target_count # # Random under-sampling count_class_normal, count_class_neptune, count_class_back, count_class_teardrop, count_class_satan, count_class_warezclient, count_class_ipsweep, count_class_smurf, count_class_portsweep, count_class_pod, count_class_nmap, count_class_guess_passwd, count_class_buffer_overflow, count_class_warezmaster, count_class_land, count_class_imap, count_class_rootkit, count_class_loadmodule, count_class_ftp_write, count_class_multihop, count_class_phf, count_class_perl, count_class_spy = df.target.value_counts() # Divide by class df_normal = df[df['target'] == 'normal.'] df_neptune = df[df['target'] == 'neptune.'] df_back = df[df['target'] == 'back.'] df_teardrop = df[df['target'] == 'teardrop.'] df_satan = df[df['target'] == 'satan.'] df_warezclient = df[df['target'] == 'warezclient.'] df_ipsweep = df[df['target'] == 'ipsweep.'] df_smurf = df[df['target'] == 'smurf.'] df_portsweep = df[df['target'] == 'portsweep.'] df_pod = df[df['target'] == 'pod.'] df_nmap = df[df['target'] == 'nmap.'] df_guess_passwd = df[df['target'] == 'guess_passwd.'] df_buffer_overflow = df[df['target'] == 'buffer_overflow.'] df_warezmaster = df[df['target'] == 'warezmaster.'] df_land = df[df['target'] == 'land.'] df_imap = df[df['target'] == 'imap.'] df_rootkit = df[df['target'] == 'rootkit.'] df_loadmodule = df[df['target'] == 'loadmodule.'] df_ftp_write = df[df['target'] == 'ftp_write.'] df_multihop = df[df['target'] == 'multihop.'] df_phf = df[df['target'] == 'phf.'] df_perl = df[df['target'] == 'perl.'] df_spy = df[df['target'] == 'spy.'] df_normal_under = df_normal.sample(count_class_neptune) df_test_under = pd.concat([df_normal_under, df_neptune], axis=0) print('Random under-sampling:') print(df_test_under.target.value_counts()) from matplotlib import pyplot as plt my_colors = 'rgbkymc' df_test_under.target.value_counts().plot(kind='bar', title='Count (target)', color=my_colors); plt.show() # # Random oversampling df_neptune_over = df_neptune.sample(count_class_normal, replace=True) df_test_over = pd.concat([df_normal, df_neptune_over], axis=0) # + print('Random over-sampling:') print(df_test_over.target.value_counts()) df_test_over.target.value_counts().plot(kind='bar', title='Count (target)', color=my_colors); plt.show() # - from collections import Counter from imblearn.datasets import make_imbalance df_test = pd.concat([df_loadmodule, df_rootkit], axis=0) df_test['target'] = df_test['target'].astype('category') cat_columns = df_test.select_dtypes(['category']).columns df_test[cat_columns] = df_test[cat_columns].apply(lambda x: x.cat.codes) df_test.head() data = df_test.values Y = data[:,39] X = data[:,0:39] X.shape Y = Y.reshape(-1, 1) # + from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler sScaler = StandardScaler() rescaleX = sScaler.fit_transform(X) pca = PCA(n_components=2) rescaleX = pca.fit_transform(rescaleX) rescaleX = np.append(rescaleX, Y, axis=1) # - principalDf = pd.DataFrame(data = rescaleX, columns = ['principal component 1', 'principal component 2', 'target']) principalDf.head() import matplotlib.pyplot as plt plt.clf() plt.figure(figsize=(10,6)) plt.scatter(principalDf.iloc[:,0], principalDf.iloc[:,1]) plt.title('Principal component 1 vs 2') plt.xlabel("pc 1") plt.ylabel("pc 2") plt.show() data = principalDf.values principalDf label = data[:,2] label plt.clf() plt.figure(figsize = (10, 6)) names = ['loadmodule', 'rootkit'] colors = ['#1F77B4', '#FF7F0E'] markers = ['o', 's'] #label = numpy array of target column plt.xlabel('pc1') plt.ylabel('pc2') for i in range(len(names)): bucket = principalDf[principalDf['target'] == i] bucket = bucket.iloc[:,[0,1]].values plt.scatter(bucket[:, 0], bucket[:, 1], c=colors[i], label=names[i], marker=markers[i]) plt.legend() plt.show() data = principalDf.values Y = data[:,2] X = data[:,0:2] # # Random under-sampling and over-sampling with imbalanced-learn # # + from imblearn.under_sampling import RandomUnderSampler rus = RandomUnderSampler(return_indices=True) X_rus, Y_rus, id_rus = rus.fit_sample(X, Y) print('Removed indexes:', id_rus) # - Y_rus = Y_rus.reshape(-1, 1) X_rus = np.append(X_rus, Y_rus, axis=1) principalDf = pd.DataFrame(data = X_rus, columns = ['principal component 1', 'principal component 2', 'target']) plt.clf() plt.figure(figsize = (10, 6)) names = ['back', 'neptune'] colors = ['#1F77B4', '#FF7F0E'] markers = ['o', 's'] #label = numpy array of target column plt.title('Principal component 1 vs 2') plt.xlabel('pc1') plt.ylabel('pc2') for i in range(len(names)): bucket = principalDf[principalDf['target'] == i] bucket = bucket.iloc[:,[0,1]].values plt.scatter(bucket[:, 0], bucket[:, 1], c=colors[i], label=names[i], marker=markers[i]) plt.legend() plt.show() principalDf.groupby('target').size() # + from imblearn.over_sampling import RandomOverSampler ros = RandomOverSampler() X_ros, Y_ros = ros.fit_sample(X, Y) print(X_ros.shape[0] - X.shape[0], 'new random picked points') # - Y_ros = Y_ros.reshape(-1, 1) X_ros = np.append(X_ros, Y_ros, axis=1) principalDf = pd.DataFrame(data = X_ros, columns = ['principal component 1', 'principal component 2', 'target']) plt.clf() plt.figure(figsize = (10, 6)) names = ['back', 'neptune'] colors = ['#1F77B4', '#FF7F0E'] markers = ['o', 's'] #label = numpy array of target column plt.title('Principal component 1 vs 2') plt.xlabel('pc1') plt.ylabel('pc2') for i in range(len(names)): bucket = principalDf[principalDf['target'] == i] bucket = bucket.iloc[:,[0,1]].values plt.scatter(bucket[:, 0], bucket[:, 1], c=colors[i], label=names[i], marker=markers[i]) plt.legend() plt.show() principalDf.groupby('target').size() # # Under-sampling: Tomek links # + from imblearn.under_sampling import TomekLinks tl = TomekLinks(return_indices=True, ratio='majority') X_tl, Y_tl, id_tl = tl.fit_sample(X, Y) print('Removed indexes:', id_tl) # - Y_tl.shape Y_tl = Y_tl.reshape(-1, 1) X_tl = np.append(X_tl, Y_tl, axis=1) principalDf = pd.DataFrame(data = X_tl, columns = ['principal component 1', 'principal component 2', 'target']) plt.clf() plt.figure(figsize = (10, 6)) names = ['back', 'neptune'] colors = ['#1F77B4', '#FF7F0E'] markers = ['o', 's'] #label = numpy array of target column plt.title('Principal component 1 vs 2') plt.xlabel('pc1') plt.ylabel('pc2') for i in range(len(names)): bucket = principalDf[principalDf['target'] == i] bucket = bucket.iloc[:,[0,1]].values plt.scatter(bucket[:, 0], bucket[:, 1], c=colors[i], label=names[i], marker=markers[i]) plt.legend() plt.show() principalDf.groupby('target').size() # # Under-sampling: Cluster Centroids # + from imblearn.under_sampling import ClusterCentroids cc = ClusterCentroids(ratio={1: 968, 0: 500}) X_cc, Y_cc = cc.fit_sample(X, Y) # - X_cc.shape Y_cc.shape X_cc Y_cc = Y_cc.reshape(-1, 1) X_cc = np.append(X_cc, Y_cc, axis=1) principalDf = pd.DataFrame(data = X_cc, columns = ['principal component 1', 'principal component 2', 'target']) principalDf.shape plt.clf() plt.figure(figsize = (10, 6)) names = ['back', 'neptune'] colors = ['#1F77B4', '#FF7F0E'] markers = ['o', 's'] #label = numpy array of target column plt.title('Principal component 1 vs 2') plt.xlabel('pc1') plt.ylabel('pc2') for i in range(len(names)): bucket = principalDf[principalDf['target'] == i] bucket = bucket.iloc[:,[0,1]].values plt.scatter(bucket[:, 0], bucket[:, 1], c=colors[i], label=names[i], marker=markers[i]) plt.legend() plt.show() principalDf.groupby('target').size() # # Over-sampling: SMOTE X # + from imblearn.over_sampling import SMOTE smote = SMOTE(ratio={0: 50, 1: 50}) X_sm, Y_sm = smote.fit_sample(X, Y) # - Y_sm.shape Y_sm = Y_sm.reshape(-1, 1) X_sm = np.append(X_sm, Y_sm, axis=1) principalDf = pd.DataFrame(data = X_sm, columns = ['principal component 1', 'principal component 2', 'target']) plt.clf() plt.figure(figsize = (10, 6)) names = ['loadmodule', 'rootkit'] colors = ['#1F77B4', '#FF7F0E'] markers = ['o', 's'] #label = numpy array of target column plt.xlabel('pc1') plt.ylabel('pc2') for i in range(len(names)): bucket = principalDf[principalDf['target'] == i] bucket = bucket.iloc[:,[0,1]].values plt.scatter(bucket[:, 0], bucket[:, 1], c=colors[i], label=names[i], marker=markers[i]) plt.legend() plt.show()
kdd resampling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## 11.5 Data Reuse # ### 11.5.1 # # > Compute the ranks of each of the matrices in Fig. 11.20. Give both a maximal set of linearly independent columns and a maximal set of linearly independent rows. # # > a) $$ # \left [ # \begin{matrix} # 0 & 1 & 5 \\ # 1 & 2 & 6 \\ # 2 & 3 & 7 \\ # 3 & 4 & 8 \\ # \end{matrix} # \right ] # $$ # Rank: 2 # # Linearly independent rows: # * $[0, 1, 5]$ # * $[1, 2, 6]$ # > b) $$ # \left [ # \begin{matrix} # 1 & 2 & 3 & 4 \\ # 5 & 6 & 7 & 8 \\ # 9 & 10 & 12 & 15 \\ # 3 & 2 & 2 & 3 \\ # \end{matrix} # \right ] # $$ # Rank: 3 # # Linearly independent rows: # * $[1, 2, 3, 4]$ # * $[5, 6, 7, 8]$ # * $[3, 2, 2, 3]$ # > c) $$ # \left [ # \begin{matrix} # 0 & 0 & 1 \\ # 0 & 1 & 1 \\ # 1 & 1 & 1 \\ # 5 & 6 & 3 \\ # \end{matrix} # \right ] # $$ # Rank: 3 # # Linearly independent rows: # * $[0, 0, 1]$ # * $[0, 1, 1]$ # * $[1, 1, 1]$ # ### 11.5.2 # # > Find a basis for the null space of each matrix in Fig. 11. 20. # # > a) # $[4, -5, 1]$ # > b) # $[-1, 3, -3, 1]$ # > c) # null # ### 11.5.5 # The vector $[1, 0, \dots, 0, 0]$ must be in the null space of the truncated matrix.
11/11.5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:notebook_test] # language: python # name: conda-env-notebook_test-py # --- # # Degradation example with clearsky workflow # # # This juypter notebook is intended to illustrate the degradation analysis workflow. In addition, the notebook demonstrates the effects of changes in the workflow. For a consistent experience, we recommend installing the packages and versions documented in `docs/notebook_requirements.txt`. This can be achieved in your environment by running `pip install -r docs/notebook_requirements.txt` from the base directory. (RdTools must also be separately installed.) # # The degradation calculations consist of several steps illustrated here: # <ol start="0"> # <li><b>Import and preliminary calculations</b></li> # <li><b>Normalize</b> data using a performance metric</li> # <li><b>Filter</b> data that creates bias</li> # <li><b>Aggregate data</b></li> # <li> <b>Analyze</b> aggregated data to estimate the degradation rate</li> # </ol> # # After demonstrating these steps using sensor data, a modified version of the workflow is illustrated using modled clear sky irradiance and temperature. The results from the two methods are compared # # This notebook works with public data from the the Desert Knowledge Australia Solar Centre. Please download the site data from Site 12, and unzip the csv file in the folder: # ./rdtools/docs/ # # Note this example was run with data downloaded on Sept. 28, 2018. An older version of the data gave different sensor-based results. If you have an older version of the data and are getting different results, please try redownloading the data. # # http://dkasolarcentre.com.au/historical-data/download # + from datetime import timedelta import pandas as pd import matplotlib.pyplot as plt import numpy as np import pvlib import rdtools # %matplotlib inline # This helps dates get plotted properly pd.plotting.register_matplotlib_converters() # - #Update the style of plots import matplotlib matplotlib.rcParams.update({'font.size': 12, 'figure.figsize': [4.5, 3], 'lines.markeredgewidth': 0, 'lines.markersize': 2 }) # # 0: Import and preliminary calculations # # # This section prepares the data necesary for an `rdtools` calculation. The first step of the `rdtools` workflow is normaliztion, which requires a time series of energy yield, a time series of cell temperature, and a time series of irradiance, along with some metadata (see Step 1: Normalize) # # The following section loads the data, adjusts units where needed, and renames the critical columns. The irradiance sensor data source is transposed to plane-of-array, and the temperature sensor data source is converted into estimated cell temperature. # # A common challenge is handling datasets with and without daylight savings time. Make sure to specify a `pytz` timezone that does or does not include daylight savings time as appropriate for your dataset. # # <b>The steps of this section may change depending on your data source or the system being considered. Note that nothing in this first section utlizes the `rdtools` library.</b> Transposition of irradiance and modeling of cell temperature are generally outside the scope of `rdtools`. A variety of tools for these calculations are avaialble in [`pvlib`](https://github.com/pvlib/pvlib-python). # + file_name = '84-Site_12-BP-Solar.csv' df = pd.read_csv(file_name) try: df.columns = [col.decode('utf-8') for col in df.columns] except AttributeError: pass # Python 3 strings are already unicode literals df = df.rename(columns = { u'12 BP Solar - Active Power (kW)':'power', u'12 BP Solar - Wind Speed (m/s)': 'wind', u'12 BP Solar - Weather Temperature Celsius (\xb0C)': 'Tamb', u'12 BP Solar - Global Horizontal Radiation (W/m\xb2)': 'ghi', u'12 BP Solar - Diffuse Horizontal Radiation (W/m\xb2)': 'dhi' }) # Specify the Metadata meta = {"latitude": -23.762028, "longitude": 133.874886, "timezone": 'Australia/North', "tempco": -0.005, "azimuth": 0, "tilt": 20, "pdc": 5100.0, "temp_model": 'open_rack_cell_polymerback'} df.index = pd.to_datetime(df.Timestamp) # TZ is required for irradiance transposition df.index = df.index.tz_localize(meta['timezone'], ambiguous = 'infer') # Explicitly trim the dates so that runs of this example notebook # are comparable when the sourec dataset has been downloaded at different times df = df['2008-11-11':'2017-05-15'] # Chage power from kilowatts to watts df['power'] = df.power * 1000.0 # There is some missing data, but we can infer the frequency from the first several data points freq = pd.infer_freq(df.index[:10]) # And then set the frequency of the dataframe df = df.resample(freq).median() # Calculate energy yield in Wh df['energy'] = df.power * pd.to_timedelta(df.power.index.freq).total_seconds()/(3600.0) # Calculate POA irradiance from DHI, GHI inputs loc = pvlib.location.Location(meta['latitude'], meta['longitude'], tz = meta['timezone']) sun = loc.get_solarposition(df.index) # calculate the POA irradiance sky = pvlib.irradiance.isotropic(meta['tilt'], df.dhi) df['dni'] = (df.ghi - df.dhi)/np.cos(np.deg2rad(sun.zenith)) beam = pvlib.irradiance.beam_component(meta['tilt'], meta['azimuth'], sun.zenith, sun.azimuth, df.dni) df['poa'] = beam + sky # Calculate cell temperature df_temp = pvlib.pvsystem.sapm_celltemp(df.poa, df.wind, df.Tamb, model = meta['temp_model']) df['Tcell'] = df_temp.temp_cell # plot the AC power time series fig, ax = plt.subplots(figsize=(4,3)) ax.plot(df.index, df.power, 'o', alpha = 0.01) ax.set_ylim(0,7000) fig.autofmt_xdate() ax.set_ylabel('AC Power (W)'); # - # # 1: Normalize # # Data normalization is achieved with `rdtools.normalize_with_pvwatts()`. We provide a time sereis of energy, along with keywords used to run a pvwatts model of the system. More information available in the docstring. # + # Specify the keywords for the pvwatts model pvwatts_kws = {"poa_global" : df.poa, "P_ref" : meta['pdc'], "T_cell" : df.Tcell, "G_ref" : 1000, "T_ref": 25, "gamma_pdc" : meta['tempco']} # Calculate the normaliztion, the function also returns the relevant insolation for # each point in the normalized PV energy timeseries normalized, insolation = rdtools.normalize_with_pvwatts(df.energy, pvwatts_kws) df['normalized'] = normalized df['insolation'] = insolation # Plot the normalized power time series fig, ax = plt.subplots() ax.plot(normalized.index, normalized, 'o', alpha = 0.05) ax.set_ylim(0,2) fig.autofmt_xdate() ax.set_ylabel('Normalized energy'); # - # # 2: Filter # # Data filtering is used to exclude data points that represent invalid data, create bias in the analysis, or introduce significant noise. # # It can also be useful to remove outages and outliers. Sometimes outages appear as low but non-zero yield. Automatic functions for this are not yet included in `rdtools`. Such filters should be implimented by the analyst if needed. # + # Calculate a collection of boolean masks that can be used # to filter the time series nz_mask = (df['normalized'] > 0) poa_mask = rdtools.poa_filter(df['poa']) tcell_mask = rdtools.tcell_filter(df['Tcell']) clip_mask = rdtools.clip_filter(df['power']) # filter the time series and keep only the columns needed for the # remaining steps filtered = df[nz_mask & poa_mask & tcell_mask & clip_mask] filtered = filtered[['insolation', 'normalized']] fig, ax = plt.subplots() ax.plot(filtered.index, filtered.normalized, 'o', alpha = 0.05) ax.set_ylim(0,2) fig.autofmt_xdate() ax.set_ylabel('Normalized energy'); # - # # 3: Aggregate # # Data is aggregated with an irradiance weighted average. This can be useful, for example with daily aggregation, to reduce the impact of high-error data points in the morning and evening. # + daily = rdtools.aggregation_insol(filtered.normalized, filtered.insolation, frequency = 'D') fig, ax = plt.subplots() ax.plot(daily.index, daily, 'o', alpha = 0.1) ax.set_ylim(0,2) fig.autofmt_xdate() ax.set_ylabel('Normalized energy'); # - # # 4: Degradation calculation # # Data is then analyzed to estimate the degradation rate representing the PV system behavior. The results are visualized and statistics are reported, including the 68.2% confidence interval, and the P95 exceedence value. # + # Calculate the degradation rate using the YoY method yoy_rd, yoy_ci, yoy_info = rdtools.degradation_year_on_year(daily, confidence_level=68.2) # Note the default confidence_level of 68.2 is approrpriate if you would like to # report a confidence interval analogous to the standard deviation of a normal # distribution. The size of the confidence interval is adjustable by setting the # confidence_level variable. # Visualize the results start = daily.index[0] end = daily.index[-1] years = (end - start).days / 365.0 yoy_values = yoy_info['YoY_values'] x = [start, end] y = [1, 1 + (yoy_rd * years)/100] fig, (ax1, ax2) = plt.subplots(1,2, figsize=(10, 3)) ax2.hist(yoy_values, label='YOY', bins=len(yoy_values)//40) ax2.axvline(x=yoy_rd, color='black', linestyle='dashed', linewidth=3) ax2.set_xlim(-30,45) ax2.annotate( u' $R_{d}$ = %.2f%%/yr \n confidence interval: \n %.2f to %.2f %%/yr' %(yoy_rd, yoy_ci[0], yoy_ci[1]), xy=(0.5, 0.7), xycoords='axes fraction', bbox=dict(facecolor='white', edgecolor=None, alpha = 0)) ax2.set_xlabel('Annual degradation (%)'); ax1.plot(daily.index, daily/yoy_info['renormalizing_factor'], 'o', alpha = 0.5) ax1.plot(x, y, 'k--', linewidth=3) ax1.set_xlabel('Date') ax1.set_ylabel('Renormalized Energy') ax1.set_ylim(0.5, 1.1) fig.autofmt_xdate() fig.suptitle('Sensor-based degradation results'); # - # In addition to the confidence interval, the year-on-year method yields an exceedence value (e.g. P95), the degradation rate that was exceeded (slower degradation) with a given probability level. The probability level is set via the `exceedence_prob` keyword in `degradation_year_on_year`. print('The P95 exceedance level is %.2f%%/yr' % yoy_info['exceedance_level']) # # Clear sky workflow # The clear sky workflow is useful in that it avoids problems due to drift or recalibration of ground-based sensors. We use `pvlib` to model the clear sky irradiance. This is renormalized to align it with ground-based measurements. Finally we use `rdtools.get_clearsky_tamb()` to model the ambient temperature on clear sky days. This modeled ambient temperature is used to model cell temperature with `pvlib`. If high quality amabient temperature data is available, that can be used instead of the modeled ambient; we proceed with the modeled ambient temperature here for illustrative purposes. # # In this example, note that we have omitted wind data in the cell temperature calculations for illustrative purposes. Wind data can also be included when the data source is trusted for improved results # # **Note that the claculations below rely on some objects from the steps above** # # Clear Sky 0: Preliminary Calculations # + # Calculate the clear sky POA irradiance clearsky = loc.get_clearsky(df.index, solar_position = sun) cs_sky = pvlib.irradiance.isotropic(meta['tilt'], clearsky.dhi) cs_beam = pvlib.irradiance.beam_component(meta['tilt'], meta['azimuth'], sun.zenith, sun.azimuth, clearsky.dni) df['clearsky_poa'] = cs_beam + cs_sky # Renormalize the clear sky POA irradiance df['clearsky_poa'] = rdtools.irradiance_rescale(df.poa, df.clearsky_poa, method='iterative') # Calculate the clearsky temperature df['clearsky_Tamb'] = rdtools.get_clearsky_tamb(df.index, meta['latitude'], meta['longitude']) df_clearsky_temp = pvlib.pvsystem.sapm_celltemp(df.clearsky_poa, 0, df.clearsky_Tamb, model = meta['temp_model']) df['clearsky_Tcell'] = df_clearsky_temp.temp_cell # - # # Clear Sky 1: Normalize # Normalize as in step 1 above, but this time using clearsky modeled irradiance and cell temperature # + clearsky_pvwatts_kws = {"poa_global" : df.clearsky_poa, "P_ref" : meta['pdc'], "T_cell" :df.clearsky_Tcell, "G_ref" : 1000, "T_ref": 25, "gamma_pdc" : meta['tempco']} clearsky_normalized, clearsky_insolation = rdtools.normalize_with_pvwatts(df.energy, clearsky_pvwatts_kws) df['clearsky_normalized'] = clearsky_normalized df['clearsky_insolation'] = clearsky_insolation # - # # Clear Sky 2: Filter # Filter as in step 2 above, but with the addition of a clear sky index (csi) filter so we consider only points well modeled by the clear sky irradiance model. # + # Perform clearsky filter cs_nz_mask = (df['clearsky_normalized'] > 0) cs_poa_mask = rdtools.poa_filter(df['clearsky_poa']) cs_tcell_mask = rdtools.tcell_filter(df['clearsky_Tcell']) csi_mask = rdtools.csi_filter(df.insolation, df.clearsky_insolation) clearsky_filtered = df[cs_nz_mask & cs_poa_mask & cs_tcell_mask & clip_mask & csi_mask] clearsky_filtered = clearsky_filtered[['clearsky_insolation', 'clearsky_normalized']] # - # # Clear Sky 3: Aggregate # Aggregate the clear sky version of of the filtered data clearsky_daily = rdtools.aggregation_insol(clearsky_filtered.clearsky_normalized, clearsky_filtered.clearsky_insolation) # # Clear Sky 4: Degradation Calculation # Estimate the degradation rate and compare to the results obtained with sensors. In this case, we see that irradiance sensor drift may have biased the sensor-based results, a problem that is corrected by the clear sky approach. # + # Calculate the degradation rate using the YoY method cs_yoy_rd, cs_yoy_ci, cs_yoy_info = rdtools.degradation_year_on_year(clearsky_daily, confidence_level=68.2) # Note the default confidence_level of 68.2 is approrpriate if you would like to # report a confidence interval analogous to the standard deviation of a normal # distribution. The size of the confidence interval is adjustable by setting the # confidence_level variable. # Visualize the results cs_start = clearsky_daily.index[0] cs_end = clearsky_daily.index[-1] cs_years = (cs_end - cs_start).days / 365.0 cs_yoy_values = cs_yoy_info['YoY_values'] cs_x = [cs_start, cs_end] cs_y = [1, 1 + (cs_yoy_rd * cs_years)/100] fig, (ax1, ax2) = plt.subplots(1,2, figsize=(10, 3)) ax2.hist(cs_yoy_values, label='YOY', bins=len(cs_yoy_values)//40, color = 'orangered') ax2.axvline(x=cs_yoy_rd, color='black', linestyle='dashed', linewidth=3) ax2.set_xlim(-30,45) ax2.annotate( u' $R_{d}$ = %.2f%%/yr \n confidence interval: \n %.2f to %.2f %%/yr' %(cs_yoy_rd, cs_yoy_ci[0], cs_yoy_ci[1]), xy=(0.5, 0.7), xycoords='axes fraction', bbox=dict(facecolor='white', edgecolor=None, alpha = 0)) ax2.set_xlabel('Annual degradation (%)'); ax1.plot(clearsky_daily.index, clearsky_daily/cs_yoy_info['renormalizing_factor'], 'o', color = 'orangered', alpha = 0.5) ax1.plot(cs_x, cs_y, 'k--', linewidth=3) ax1.set_xlabel('Date') ax1.set_ylabel('Renormalized Energy') ax1.set_ylim(0.5, 1.1) fig.autofmt_xdate() fig.suptitle('Clear-sky-based degradation results'); # repeat the plots from above fig, (ax1, ax2) = plt.subplots(1,2, figsize=(10, 3)) ax2.hist(yoy_values, label='YOY', bins=len(yoy_values)//40) ax2.axvline(x=yoy_rd, color='black', linestyle='dashed', linewidth=3) ax2.set_xlim(-30,45) ax2.annotate( u' $R_{d}$ = %.2f%%/yr \n confidence interval: \n %.2f to %.2f %%/yr' %(yoy_rd, yoy_ci[0], yoy_ci[1]), xy=(0.5, 0.7), xycoords='axes fraction', bbox=dict(facecolor='white', edgecolor=None, alpha = 0)) ax2.set_xlabel('Annual degradation (%)'); ax1.plot(daily.index, daily/yoy_info['renormalizing_factor'], 'o', alpha = 0.5) ax1.plot(x, y, 'k--', linewidth=3) ax1.set_xlabel('Date') ax1.set_ylabel('Renormalized Energy') ax1.set_ylim(0.5, 1.1) fig.autofmt_xdate() fig.suptitle('Sensor-based degradation results'); print('The P95 exceedance level with the clear sky analysis is %.2f%%/yr' % cs_yoy_info['exceedance_level'])
docs/degradation_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Convolution Neural Network - Fashion MNIST # CNN allow us to extract the features of the image while maintaining the spatial arrangement of the image. # # Lets apply this to the Fashion MNIST Example import numpy as np import pandas as pd import keras import matplotlib.pyplot as plt % matplotlib inline import vis from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Activation, Flatten, Dense, Dropout from keras import backend as K # ## Get Data from keras.datasets import fashion_mnist (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() x_train.shape, y_train.shape, x_test.shape, y_test.shape labels = vis.fashion_mnist_label() # **Step 1: Prepare the images and labels** # # Reshape data for convolution network K.image_data_format() x_train_conv = x_train.reshape(x_train.shape[0], 28, 28, 1) x_test_conv = x_test.reshape(x_test.shape[0], 28, 28, 1) input_shape = (28, 28, 1) # Convert from 'uint8' to 'float32' and normalise the data to (0,1) x_train_conv = x_train_conv.astype("float32") / 255 x_test_conv = x_test_conv.astype("float32") / 255 # Convert class vectors to binary class matrices # convert class vectors to binary class matrices y_train_class = keras.utils.to_categorical(y_train, 10) y_test_class = keras.utils.to_categorical(y_test, 10) # ## Model 1: Simple Convolution # **Step 2: Craft the feature transfomation and classifier model ** model_simple_conv = Sequential() model_simple_conv.add(Conv2D(2, (3, 3), activation ="relu", input_shape=(28, 28, 1))) model_simple_conv.add(Flatten()) model_simple_conv.add(Dense(100, activation='relu')) model_simple_conv.add(Dense(10, activation='softmax')) model_simple_conv.summary() # **Step 3: Compile and fit the model** model_simple_conv.compile(loss='categorical_crossentropy', optimizer="sgd", metrics=['accuracy']) # %%time output_simple_conv = model_simple_conv.fit(x_train_conv, y_train_class, batch_size=512, epochs=5, verbose=2, validation_data=(x_test_conv, y_test_class)) # **Step 4: Check the performance of the model** vis.metrics(output_simple_conv.history) score = model_simple_conv.evaluate(x_test_conv, y_test_class, verbose=1) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # **Step 5: Make & Visualise the Prediction** predict_classes_conv = model_simple_conv.predict_classes(x_test_conv) pd.crosstab(y_test, predict_classes_conv) proba_conv = model_simple_conv.predict_proba(x_test_conv) i = 0 vis.imshow(x_test[i], labels[y_test[i]]) | vis.predict(proba_conv[i], y_test[i], labels) # ## Model 2: Convulation + Max Pooling # **Step 2: Craft the feature transfomation and classifier model ** model_pooling_conv = Sequential() model_pooling_conv.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28,28,1))) model_pooling_conv.add(MaxPooling2D(pool_size=(2, 2))) model_pooling_conv.add(Conv2D(64, (3, 3), activation='relu')) model_pooling_conv.add(MaxPooling2D(pool_size=(2, 2))) model_pooling_conv.add(Flatten()) model_pooling_conv.add(Dense(128, activation='relu')) model_pooling_conv.add(Dense(10, activation='softmax')) model_pooling_conv.summary() # **Step 3: Compile and fit the model** model_pooling_conv.compile(loss='categorical_crossentropy', optimizer="sgd", metrics=['accuracy']) # %%time output_pooling_conv = model_pooling_conv.fit(x_train_conv, y_train_class, batch_size=512, epochs=5, verbose=2, validation_data=(x_test_conv, y_test_class)) # **Step 4: Check the performance of the model** vis.metrics(output_pooling_conv.history) score = model_pooling_conv.evaluate(x_test_conv, y_test_class, verbose=1) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # **Step 5: Make & Visualise the Prediction** predict_classes_pooling = model_pooling_conv.predict_classes(x_test_conv) pd.crosstab(y_test, predict_classes_pooling) proba_pooling = model_pooling_conv.predict_classes(x_test_conv) i = 5 vis.imshow(x_test[i], labels[y_test[i]]) | vis.predict(proba_conv[i], y_test[i], labels) # ## Model 3: Convulation + Max Pooling + Dropout # **Step 2: Craft the feature transfomation and classifier model ** model_dropout_conv = Sequential() model_dropout_conv.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) model_dropout_conv.add(MaxPooling2D(pool_size=(2, 2))) model_dropout_conv.add(Conv2D(64, (3, 3), activation='relu')) model_dropout_conv.add(MaxPooling2D(pool_size=(2, 2))) model_dropout_conv.add(Dropout(0.25)) model_dropout_conv.add(Flatten()) model_dropout_conv.add(Dense(128, activation='relu')) model_dropout_conv.add(Dropout(0.5)) model_dropout_conv.add(Dense(10, activation='softmax')) model_dropout_conv.summary() # **Step 3: Compile and fit the model** model_dropout_conv.compile(loss='categorical_crossentropy', optimizer="sgd", metrics=['accuracy']) # %%time output_dropout_conv = model_dropout_conv.fit(x_train_conv, y_train_class, batch_size=512, epochs=5, verbose=1, validation_data=(x_test_conv, y_test_class)) # **Step 4: Check the performance of the model** vis.metrics(output_dropout_conv.history) score = model_dropout_conv.evaluate(x_test_conv, y_test_class, verbose=1) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # **Step 5: Make & Visualise the Prediction** predict_classes_dropout = model_dropout_conv.predict_classes(x_test_conv) pd.crosstab(y_test, predict_classes_dropout) proba_dropout = model_dropout_conv.predict_proba(x_test_conv) vis.imshow(x_test[i], labels[y_test[i]]) | vis.predict(proba_dropout[i], y_test[i], labels)
004-CNN-Fashion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true pycharm={"name": "#%% md\n"} # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Read-Data" data-toc-modified-id="Read-Data-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Read Data</a></span></li><li><span><a href="#Data-Visualisation" data-toc-modified-id="Data-Visualisation-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Data Visualisation</a></span></li><li><span><a href="#Pre-Process-Weather-Data-+-Visualization" data-toc-modified-id="Pre-Process-Weather-Data-+-Visualization-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Pre Process Weather Data + Visualization</a></span></li><li><span><a href="#Prepare-Data-for-a-simple-Linear-Regression" data-toc-modified-id="Prepare-Data-for-a-simple-Linear-Regression-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Prepare Data for a simple Linear Regression</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Merge-Power-Plant-and-Weather-Data" data-toc-modified-id="Merge-Power-Plant-and-Weather-Data-4.0.1"><span class="toc-item-num">4.0.1&nbsp;&nbsp;</span>Merge Power Plant and Weather Data</a></span></li><li><span><a href="#split-data" data-toc-modified-id="split-data-4.0.2"><span class="toc-item-num">4.0.2&nbsp;&nbsp;</span>split data</a></span></li><li><span><a href="#Create-Linear-Regression-Model" data-toc-modified-id="Create-Linear-Regression-Model-4.0.3"><span class="toc-item-num">4.0.3&nbsp;&nbsp;</span>Create Linear Regression Model</a></span></li></ul></li></ul></li><li><span><a href="#New-markdown-cell" data-toc-modified-id="New-markdown-cell-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>New markdown cell</a></span></li></ul></div> # + cell_id="f5770adc-b39d-4195-a959-ff8540a1b5b0" # !python --version # + cell_id="6a9f3678-4554-4e53-a2ae-18a1cbe2bf2e" tags=[] # !pip install statsmodels # + cell_id="ea3944c7-9d2a-4f85-a76f-f713ddaafdfd" import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt import sklearn from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression import statsmodels import statsmodels.api as sm # + cell_id="9502f395-0ac1-4666-a18b-e15dfd160f9e" tags=[] pycharm={"name": "#%%\n"} print("Pandas==",pd.__version__, sep='') print("Numpy==", np.__version__, sep='') print("matplotlib==", matplotlib.__version__, sep='') print("sklearn==", np.__version__, sep='') print("statsmodels==", np.__version__, sep='') # + [markdown] cell_id="c6af023e-06f0-46fd-9b73-b201a79250da" tags=[] pycharm={"name": "#%% md\n"} # ## Read Data # Da es noch ein paar Schwierigkeiten mit dem Import der preprocessor files gab, werden die jeweiligen Funktionen in # diesem Notebook definiert und ausgeführt. # + cell_id="95d3f321-1db2-4054-8d28-e4a8ac5f26ba" tags=[] # read data power plants data_power_plant_a = pd.read_csv("../data/data_power_plants/A.csv") data_power_plant_b = pd.read_csv("../data/data_power_plants/B.csv") data_power_plant_c = pd.read_csv("../data/data_power_plants/C.csv") data_weather = pd.read_csv("../data/data_weather/weather_aargau_2019.csv") # + cell_id="6c4081c9-091c-4843-a300-4d1d1d11f058" tags=[] def _format_columns(df): columns_lower = df.columns.str.lower() columns_clean = columns_lower.str.replace("-", "") df.columns = columns_clean return df def _set_datetime_index(df): """ create datetime index based on local_time, and resampled mean per hour""" df["timestamp"] = pd.to_datetime(df["timestamp"]) df.set_index(df["timestamp"], inplace=True) df = df.resample("h").mean() return df # + cell_id="5eb97411-f3d5-4db0-bc80-68bc72255064" tags=[] pycharm={"name": "#%%\n"} data_power_plant_a = _format_columns(data_power_plant_a) data_power_plant_b = _format_columns(data_power_plant_b) data_power_plant_c = _format_columns(data_power_plant_c) data_power_plant_a = _set_datetime_index(data_power_plant_a) data_power_plant_b = _set_datetime_index(data_power_plant_b) data_power_plant_c = _set_datetime_index(data_power_plant_c) # + [markdown] cell_id="eaab8557-307a-461f-a074-890f90ea0919" tags=[] pycharm={"name": "#%% md\n"} # ## Data Visualisation # Eine einfache Visualisierung der Daten für einen ersten Überblick. # --> Erkenntnisse: # - Power Plant C hat weniger Daten (Spalten) als die anderen zwei # - Alle Power Plans unterscheiden sich stark, was ihre Energieproduktion angeht # + cell_id="f614f88c-38c7-49a3-801d-805228e527a1" tags=[] data_power_plant_a # + cell_id="d367c964-5426-4527-8bb2-0237b527053c" tags=[] data_power_plant_b # + cell_id="bd134903-f47c-4a3b-9b60-f2cc74eaf266" tags=[] data_power_plant_c # + cell_id="a3030ca9-3358-4c39-8aa1-1de6b7a81630" tags=[] pycharm={"name": "#%%\n"} plt.plot(data_power_plant_a) plt.title("Power Plant A") plt.legend(data_power_plant_a.columns) plt.show() plt.plot(data_power_plant_b) plt.title("Power Plant B") plt.legend(data_power_plant_b.columns) plt.show() plt.plot(data_power_plant_c) plt.title("Power Plant C") plt.legend(data_power_plant_c.columns) plt.show() plt.hist(data_power_plant_a["grid_feedin_kw"] , bins = 30 , range = (0.0001, max(data_power_plant_a["grid_feedin_kw"])) ) plt.title("Power Plant A (>0)") plt.show() plt.hist(data_power_plant_b["grid_feedin_kw"] , bins = 30 , range = (0.0001, max(data_power_plant_b["grid_feedin_kw"])) ) plt.title("Power Plant B (>0)") plt.show() plt.hist(data_power_plant_c["grid_feedin_kw"] , bins = 30 , range = (0.0001, max(data_power_plant_c["grid_feedin_kw"])) ) plt.title("Power Plant C (>0)") plt.show() # + [markdown] cell_id="00773925-b97d-44a9-ac96-0550737dc7b1" tags=[] pycharm={"name": "#%% md\n"} # ## Pre Process Weather Data + Visualization # Findings: # - Wetterdaten fehlen für eine Stunde --> interpoliert # + cell_id="d47302c8-c2c6-4a88-a94f-acda593918ef" tags=[] def weather_format_columns(df): columns_lower = df.columns.str.lower() columns_clean = columns_lower.str.replace("-", "") df.columns = columns_clean return df def weather_set_datetime_index(df): """ create datetime index based on local_time, and resampled mean per hour""" df["timestamp"] = pd.to_datetime(df["local_time"]) df.set_index(df["timestamp"], inplace=True) df = df.resample("h").mean() return df # + cell_id="c58330d2-a4c9-4e05-950e-e0c8ab5c99fd" tags=[] data_weather = weather_format_columns(data_weather) data_weather = weather_set_datetime_index(data_weather) # + cell_id="116fd101-ef03-47e5-94c3-856ab85f0b5a" tags=[] pycharm={"name": "#%%\n"} data_weather # + [markdown] cell_id="86ce26b3-15bf-47cc-95a9-cc47e0760638" tags=[] pycharm={"name": "#%% md\n"} # Find Rows with missing Data # + cell_id="314614c0-89c6-48f9-b0fd-b7cd91d4cd5e" tags=[] pycharm={"name": "#%%\n"} data_weather[data_weather.isnull().any(axis=1)] # + [markdown] cell_id="30faf199-5a24-43af-8eb7-f4697efc974d" tags=[] pycharm={"name": "#%% md\n"} # Look at the data of this day (Raw output) --> Only temperature has a value, which would not be correct, if we simply # take the average of the time before and after the missing values # + cell_id="0c48eef5-a4c7-46ac-bc8f-a59d7f47ff8b" tags=[] pycharm={"name": "#%%\n"} data_weather.loc['2019-03-31'] # + [markdown] cell_id="fdb00f6d-b4e5-4542-9609-bfaa226cc3ff" tags=[] pycharm={"name": "#%% md\n"} # Interpolate missing data # + cell_id="767b23ed-b620-4291-be7d-4cca266ca26d" tags=[] data_weather_clean = data_weather.interpolate() # + pycharm={"name": "#%%\n"} data_weather_clean = data_weather.interpolate() plt.plot(data_weather_clean["temperature"]) plt.title("temperature") plt.show() plt.plot(data_weather_clean["precipitation"]) plt.title("precipitation") plt.show() plt.plot(data_weather_clean["snowfall"]) plt.title("snowfall") plt.show() plt.plot(data_weather_clean["snow_mass"]) plt.title("snow_mass") plt.show() plt.plot(data_weather_clean["air_density"]) plt.title("air_density") plt.show() plt.plot(data_weather_clean["radiation_surface"]) plt.title("radiation_surface") plt.show() plt.plot(data_weather_clean["radiation_toa"]) plt.title("radiation_toa") plt.show() plt.plot(data_weather_clean["cloud_cover"]) plt.title("cloud_cover") plt.show() # + cell_id="533a3369-5ce5-430a-84d7-9b4b34be5d67" tags=[] data_weather_enhanced = data_weather_clean.copy() data_weather_enhanced['month'] = data_weather_enhanced.index.month data_weather_enhanced['day'] = data_weather_enhanced.index.day data_weather_enhanced['hour'] = data_weather_enhanced.index.hour # + cell_id="a4319529-35dd-4975-adee-ab5fca9da9af" tags=[] data_weather_enhanced = data_weather_clean.copy() data_weather_enhanced['month'] = data_weather_enhanced.index.month data_weather_enhanced['day'] = data_weather_enhanced.index.day data_weather_enhanced['hour'] = data_weather_enhanced.index.hour # + [markdown] cell_id="d535897c-7c8d-4978-a888-d540f45afe46" tags=[] # ## Prepare Data for a simple Linear Regression # To Do: # - Daten der Power Plants mit den Wetterdaten verbinden # - Daten pro Power Plant in Test und Validation Set splitten # - Simples Regressionsmodell erstellen # - Regressionsmodell testen # - Sobald das Regressionsmodell genügt -> Output predicten und neues DF generieren (actual output, Works properly) # + [markdown] cell_id="7a1f2254-5241-4bc7-8621-41af38a472cf" tags=[] # #### Merge Power Plant and Weather Data # + cell_id="21c80d09-a41b-4712-9b6a-a7b677dc6277" tags=[] # Inner Join weil es keine Wetterdaten für 1.1.2019 00:00 Uhr gibt df_a = data_power_plant_a.copy().join(data_weather_enhanced, how='inner') df_b = data_power_plant_b.copy().join(data_weather_enhanced, how='inner') df_c = data_power_plant_c.copy().join(data_weather_enhanced, how='inner') # + [markdown] cell_id="6badebdc-0340-49e3-8da0-77e57b230124" tags=[] # #### split data # + cell_id="f6d76dec-9141-4bc5-8c82-de4f4017e4e7" tags=[] df_a_train, df_a_test = train_test_split(df_a, test_size=0.2, random_state=123) df_b_train, df_b_test = train_test_split(df_b, test_size=0.2, random_state=123) df_c_train, df_c_test = train_test_split(df_c, test_size=0.2, random_state=123) # + cell_id="fa763cd0-adb4-4f9b-9ccd-335a6b022b6e" tags=[] print(len(df_a)) print(len(df_a_train)) print(len(df_a_test)) print('-'*20) print(len(df_b)) print(len(df_b_train)) print(len(df_b_test)) print('-'*20) print(len(df_c)) print(len(df_c_train)) print(len(df_c_test)) # + [markdown] cell_id="25f7e215-ebdd-40c9-b34f-b066c79f47b4" tags=[] # #### Create Linear Regression Model # + [markdown] cell_id="6738b1a5-dcfa-46aa-a93b-98f001219864" tags=[] # Create a simple Regression Model with all possible input data # + cell_id="17dbfa61-0d5a-477d-a596-d059f96ab8b3" tags=[] X_train = df_a_train[["temperature", "precipitation", "snowfall", "snow_mass", "air_density", "radiation_surface", "radiation_toa", "cloud_cover", "month", "day", "hour"]] y_train = df_a_train["grid_feedin_kw"] X_test = df_a_test[["temperature", "precipitation", "snowfall", "snow_mass", "air_density", "radiation_surface", "radiation_toa", "cloud_cover", "month", "day", "hour"]] y_test = df_a_test["grid_feedin_kw"] regressor_OLS=sm.OLS(endog = y_train, exog = X_train).fit() regressor_OLS.summary() # + [markdown] cell_id="fc57b0e6-c90e-4586-801e-d2909487a008" tags=[] # # + cell_id="1800ee7c-9a66-47a3-944b-27c05087df42" tags=[] print("The model degree of freedom: ",regressor_OLS.df_model) print("The residual degree of freedom: ", sum(regressor_OLS.resid)/len(regressor_OLS.resid)) print("-"*20, "\n") y_predict_ols = regressor_OLS.predict(X_test) print("Average predicted grid_feedin_kw: ", sum(y_predict_ols)/ len(y_predict_ols)) print("Average actual grid_feedin_kw: ", sum(y_test)/ len(y_test)) print("Difference in prediction: ", (sum(y_predict_ols)/ len(y_predict_ols)) / (sum(y_test)/ len(y_test))) # + [markdown] cell_id="93fdfc4b-6a5a-4f13-9214-38e3a8858af4" tags=[] # Make a Prediction for the whole dataset and create a new pandas dataframe with only the acutal value and the predicted value # + cell_id="760a0a73-5034-451e-8bfa-4142c1e0816d" tags=[] df_predict_actual = pd.DataFrame(df_a.copy()["grid_feedin_kw"]) df_predict_actual["prediction"] = regressor_OLS.predict(df_a[["temperature", "precipitation", "snowfall", "snow_mass", "air_density", "radiation_surface", "radiation_toa", "cloud_cover", "month", "day", "hour"]]) # + [markdown] cell_id="2a1e34e7-72c0-47ec-931e-aa65712170a0" tags=[] # Make all prediction below 0 to 0 (as there can't be any value below 0) # + cell_id="621da8a1-382b-4644-89e2-97d0408abb31" tags=[] df_predict_actual["prediction"] = df_predict_actual["prediction"].apply(lambda x: 0 if x <= 0 else x) # + [markdown] cell_id="cc0e73bd-9a0e-4ae0-970d-2fa4de6d43a2" tags=[] # Plot the data # + cell_id="25d3e8d5-3502-4bfc-99b7-c4679fe9edac" tags=[] plt.plot(df_predict_actual["grid_feedin_kw"]) plt.title("Actual produced energy") plt.show() plt.plot(df_predict_actual["prediction"]) plt.title("Prediction energy production") plt.show() plt.plot(df_predict_actual) plt.legend(df_predict_actual.columns) plt.title("Actual vs. Produced") plt.show() # + cell_id="02ec534d-bfcc-46c2-a0d3-e456f9d4c867" tags=[] plt.plot( df_predict_actual["grid_feedin_kw"]- df_predict_actual["prediction"]) plt.title("Absolute distance)") plt.show() plt.hist(df_predict_actual["grid_feedin_kw"]- df_predict_actual["prediction"]) plt.title("Absolute distance)") plt.show() # relativer Unterschied (lambda, weil division durch 0 nicht geht) plt.plot( df_predict_actual["prediction"].apply(lambda x: x+1) / df_predict_actual["grid_feedin_kw"].apply(lambda x: x+1) ) plt.title("Relative distance") plt.show() plt.hist( df_predict_actual["prediction"].apply(lambda x: x+1) / df_predict_actual["grid_feedin_kw"].apply(lambda x: x+1) ) plt.title("Relative distance") plt.show() plt.hist( (df_predict_actual["grid_feedin_kw"]- df_predict_actual["prediction"]).abs() / df_predict_actual["grid_feedin_kw"].apply(lambda x: x+1) , bins = 27 ) plt.title("Absolute Difference relativly to the actual produced power") plt.show() plt.hist( (df_predict_actual["grid_feedin_kw"]- df_predict_actual["prediction"]) / df_predict_actual["prediction"].apply(lambda x: x+1) ) plt.title("Relative distance (relative - actual)") plt.show() plt.hist( df_predict_actual["prediction"].loc[df_predict_actual["grid_feedin_kw"] == 0] ) plt.title("How much energy should be produced, when it actualy produces none") plt.show() plt.hist( df_predict_actual["grid_feedin_kw"].loc[df_predict_actual["prediction"] == 0] ) plt.title("How much energy is produced, when it actualy is predicted to be none") plt.show() # + [markdown] cell_id="437535df-0410-4ffe-933c-b3e8f17ed8b8" tags=[] # Bei den obrigen Grafiken ist ersichtlich, dass es schwierig ist ein Verhältnis zu finden, bei dem der produzierte Wert stark vom prognostizierten abweicht # und so ein Fehler vorliegen muss. Aber es zeigt sich, dass es vorkommt, dass die PV nichts produziert, obwohl es laut prediction etwas produzieren sollte. # Umgekehrt wird nichts produziert, wenn die Prognose auch sagt, dass nichts produziert werden sollte. # # -> Daher werden die Werte als not working gelabeled, bei denen nichts produziert wird, obwohl das MOdell sagt, dass etwas produziert werden sollte. # + [markdown] cell_id="ca87976d-1463-4ff9-8a83-27dc32516857" tags=[] # ## New markdown cell # + cell_id="c0cb2242-9756-4f0f-b414-96392b41275d" tags=[] #df_predict_actual.drop('label_machine_working', axis=1, inplace=True) df_predict_actual['label_machine_working'] = df_predict_actual["prediction"].loc[df_predict_actual["grid_feedin_kw"] == 0].apply(lambda x: x <= 1) df_predict_actual['label_machine_working'].fillna(value=True, inplace=True) # + cell_id="e8f32f77-02f9-4008-818f-c2f0864179f0" tags=[] df_predict_actual # + cell_id="96b88977-ea29-409b-bcc9-2b81e11b88f1" tags=[] print("True values: ", len(df_predict_actual.loc[df_predict_actual['label_machine_working'] == True])) print("False values: ", len(df_predict_actual.loc[df_predict_actual['label_machine_working'] == False])) print("Ratio: ", len(df_predict_actual.loc[df_predict_actual['label_machine_working'] == False]) / len(df_predict_actual)) # + cell_id="5330fd59-6bbb-40ee-8da3-193d0a663dfb" tags=[] plt.plot(df_predict_actual["grid_feedin_kw"] , label=df_predict_actual["label_machine_working"] ) # + cell_id="4d852116-ba31-4cb6-a73c-36d7ac55baee" tags=[]
analysis/classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # 用通用强化学习算法自我对弈,掌握国际象棋和将棋 # [`程世东`](http://zhihu.com/people/cheng-shi-dong-47) 翻译 # # [`GitHub`](http://github.com/chengstone) [`Mail`](mailto:<EMAIL>) # # [English paper](https://arxiv.org/pdf/1712.01815.pdf) # 国际象棋是人工智能史上研究最为广泛的领域。最强大的象棋程序是基于复杂的搜索技术、适应于特定领域、和过去几十年里人类专家手工提炼的评估函数的结合。相比之下,通过自我对弈进行“白板”强化学习,在围棋游戏中AlphaGo Zero取得了超越人类的成绩。在本文中,我们将这种方法推广到一个单一的AlphaZero算法中,从“白板”开始学习,可以在许多具有挑战性的领域具有超越人类的表现。从随机下棋开始,除了游戏规则之外没有给予任何领域知识,AlphaZero在24小时内实现了在国际象棋、将棋(日本象棋)和围棋上的超人类水平,并击败了每一个世界冠军程序。 # 对计算机象棋的研究和计算机科学本身一样历史悠久。巴贝奇,图灵,香农和冯诺依曼都设计过计算机硬件、算法和理论来分析和指导下棋。国际象棋随后成为一代人工智能研究人员的挑战性任务,最终以高性能的超越人类水平的计算机国际象棋程序的出现而告终。然而,这些系统高度适应与它们的特定领域,不投入大量的人力是不能推广到其他问题的。 # 人工智能的长期目标是创造出可以从最初规则中自我学习的程序。最近,AlphaGo Zero算法通过使用深度卷积神经网络来表达围棋知识,只通过自我对弈的强化学习来训练,在围棋中实现了超人的表现。在本文中,我们应用了一个类似但完全通用的算法,我们将该算法[arXiv:1712.01815v1 [cs.AI] 5 Dec 2017]称之为AlphaZero,用来像围棋一样下国际象棋和将棋,除了游戏的规则外没有给予任何额外的领域知识,这个算法表明通用强化学习算法可以实现以“白板”方式学习,在多个具有挑战性的领域获得超人的表现。 # 1997年,“深蓝”击败了国际象棋人类世界冠军,这是人工智能的一个里程碑。计算机国际象棋程序在那以后的二十多年继续稳步超越人类水平。这些程序使用人类专家的知识和精心调校的参数评估[`走子`](https://baike.baidu.com/item/%E8%B5%B0%E5%AD%90/97274)位置,结合高性能的alpha-beta搜索,使用大量启发式和领域特定的适应性来扩展巨大的搜索树。在[`方法`](#方法)一节我们描述这些增强方法,重点关注2016年顶级国际象棋引擎锦标赛(TCEC)世界冠军Stockfish,其他强大的国际象棋程序,包括深蓝,使用的是非常相似的架构。 # 在计算复杂性方面,相比国际象棋,将棋更难:它是在一个更大的棋盘上玩,任何被俘获的对手棋子都会改变方向,随后可能被放置在棋盘上的任何位置。最强大的将棋程序,如电脑将棋协会(CSA)的世界冠军Elmo,直到最近才击败人类冠军。这些程序使用与计算机国际象棋程序类似的算法,基于高度优化的alpha-beta搜索引擎,具有许多特定领域的适应性。 # 围棋非常适合AlphaGo中使用的神经网络架构,因为游戏规则是平移不变的(匹配卷积网络的权值共享结构),是根据棋盘上的走子点之间的相邻点的自由度来定义的(匹配卷积网络的局部结构),并且是旋转和反射对称的(允许数据增强和合成)。而且,动作空间很简单(一颗棋子可以放在任何可能的位置),游戏结果只有二元结果赢或输,这两者都有助于神经网络的训练。 # 国际象棋和将棋不太适合AlphaGo的神经网络架构。这些规则是与位置有关的(例如,兵可以从第二横线前进两步,在第八横线上升变)和不对称的(例如,兵只能向前移动,在王翼和后翼的王车易位是不同的)。规则包括远程交互(例如皇后可以一次穿过整个棋盘,或者从棋盘的另一边将军)。国际象棋的行动空间包括棋盘上所有棋手棋子的所有符合规则的位置;将棋允许将被吃掉的棋子放回棋盘上。国际象棋和将棋都可能造成平局;事实上,人们认为国际象棋最佳的解决方案是平局。 # AlphaZero算法是AlphaGo Zero算法的更通用的版本。它用深度神经网络和白板强化学习算法,替代传统程序中使用的人工先验知识和特定领域增强。 # 为取代手工制作的评估函数和启发式移动排序,AlphaZero使用参数为θ的深度神经网络$(p,v)= f_θ (s)$。这个神经网络使用局面(棋盘状态)s作为输入,输出走子概率向量p,它包含每一个走子动作a的概率分量$p_a = Pr(a|s)$,同时输出一个标量值v(胜率)——从局面s估算预期结果$z,v ≈ Е[z|s]$。AlphaZero完全从自我对弈中学习这些走子概率和价值估计;然后将学到的知识指导其搜索。 # 为取代具有特定领域增强的alpha-beta搜索,AlphaZero使用通用的蒙特卡洛树搜索(MCTS)算法。每次搜索都包含一系列从根节点$s_{root}$到叶子节点遍历树的自我对弈模拟。每次模拟都是通过在每个状态s下,根据当前的神经网络$f_θ$,选择一个访问次数低、走子概率高和价值高的走子走法a(这些值是从状态s中选择的动作a的叶子节点状态上做平均)。搜索返回一个表示走子概率分布的向量π ,是在根节点状态下关于访问计数的概率分布(无论是按比例还是贪婪算法)。 # AlphaZero深度神经网络的参数θ,从随机初始化参数开始,通过自我对弈强化学习进行训练。通过MCTS($a_t$ ∼ $π_t$ )轮流为两个棋手选择走子进行下棋。在棋局结束时,根据游戏规则计算游戏结果z 作为结束位置$s_T$的评分:-1代表失败,0代表平局,+1代表胜利。更新神经网络参数θ以使预测结果$v_t$与游戏结果z之间的误差最小,并且使策略向量$p_t$与搜索概率$π_t$的相似度最大。具体而言,参数θ通过在均方误差和交叉熵损失之和上的损失函数l上做梯度下降进行调整, # $(p,v)= f_θ (s), l = (z - v)^2- π^T log p + c||θ||^2$ # 其中c是控制L2正则化水平的参数。更新的参数被用于随后的自我对弈中。 # # Where c is the parameter that controls the L2 regularization level. The updated parameters are used in the subsequent self-play. # 本文描述的AlphaZero算法在几个方面与原始的AlphaGo Zero算法不同。 # AlphaGo Zero在假设只有赢或输二元结果的情况下,对获胜概率进行估计和优化。AlphaZero会考虑平局或潜在的其他结果,对预期的结果进行估算和优化。 # 围棋的规则是旋转和反转不变的。对此,在AlphaGo和AlphaGo Zero中有两种使用方式。首先,训练数据通过为每个局面生成8个对称图像来增强。其次,MCTS期间,棋盘位置在被神经网络评估前,会使用随机选择的旋转或反转变换进行转换,以便蒙特卡洛评估在不同的偏差上进行平均。国际象棋和将棋的规则是不对称的。AlphaZero不会增强训练数据,也不会在MCTS期间转换棋盘位置。 # 在AlphaGo Zero中,自我对弈是由以前所有迭代中最好的玩家生成的。每次训练迭代之后,与最好玩家对弈测量新玩家的能力;如果以55%的优势获胜,那么它将取代最好的玩家,而自我对弈将由这个新玩家产生。相反,AlphaZero只维护一个不断更新的单个神经网络,而不是等待迭代完成。自我对弈是通过使用这个神经网络的最新参数生成的,省略了评估步骤和选择最佳玩家的过程。 # AlphaGo Zero通过贝叶斯优化调整搜索的超参数。在AlphaZero中,我们为所有棋局重复使用相同的超参数,而无需进行特定于某种游戏的调整。唯一的例外是为保证探索而添加到先验策略中的噪声;这与棋局类型的典型合法走子的数量成比例。 # 像AlphaGo Zero一样,棋盘状态仅由基于每个游戏的基本规则的空间平面编码。下棋的行动是由空间平面或平面向量编码的,而且仅仅基于每种游戏的基本规则(参见[`方法`](#方法))。 # 我们将AlphaZero算法应用于国际象棋,将棋,还有围棋。除非另有说明,所有三个游戏都使用相同的算法,网络架构和超参数。我们为每一种棋单独训练了一个AlphaZero。从随机初始化的参数开始,使用5,000个第一代TPU生成自我对弈数据和64个第二代TPU来训练神经网络,训练进行了700,000步(mini-batches 大小是4096)。 [`方法`](#方法)中提供了训练步骤的更多细节。 # 图1显示了AlphaZero在自我对弈强化学习期间的表现。在国际象棋中,AlphaZero仅仅用了4小时(300k步)就胜过了Stockfish;在将棋中,AlphaZero在不到2小时(110K步)就胜过了Elmo;而在围棋中,AlphaZero 8小时(165k步)就胜过了AlphaGo Lee。 # ![b1](./assets/b1.png) # 图1:训练AlphaZero 70万步。国际等级分是在不同的玩家之间的比赛进行评估计算出来的,每一步棋有1秒的思考时间。a国际象棋中AlphaZero的表现,与2016年TCEC世界冠军程序Stockfish比较。b在将棋中AlphaZero的表现,与2017年CSA世界冠军程序Elmo比较。c 在围棋中AlphaZero的表现,与AlphaGo Lee和AlphaGo Zero(20 block / 3天)比较。 # 我们使用所有训练好的AlphaZero,分别在国际象棋、将棋和围棋中与Stockfish, Elmo和上一个版本的AlphaGo Zero(训练3天)进行了100场比赛,时间控制在每步棋1分钟。AlphaZero和之前的AlphaGo Zero使用一台带有4个TPU的机器。Elmo和Stockfish使用他们最强的版本,使用64个线程和1GB hash。AlphaZero击败了所有的对手,对Stockfish 零封对手,对 Elmo输了8局(见几个棋局的补充材料),以及击败以前版本的AlphaGo Zero(见表1)。 # # ![b2](./assets/b2.png) # 表1: 在国际象棋,将棋和围棋中评估AlphaZero,以AlphaZero的角度的胜平负,与Stockfish, Elmo,和训练了三天的AlphaGo Zero进行100场比赛。每个程序下一步棋有1分钟的思考时间。 # 我们还分析了AlphaZero的MCTS搜索的表现,与Stockfish和Elmo使用的alpha-beta搜索引擎进行比较。AlphaZero在国际象棋中每秒只搜索8万个局面(positions),在将棋中搜索4万个,相比之下,Stockfish要搜索7000万个,Elmo搜索3500万个。AlphaZero通过使用其深层神经网络更有选择性地关注最有希望的[`变着`](https://baike.baidu.com/item/%E5%9B%BD%E9%99%85%E8%B1%A1%E6%A3%8B%E6%9C%AF%E8%AF%AD/7549734?fr=aladdin),补偿较低数量的评估- 可以说是像Shannon最初提出的那样,是一种更“人性化”的搜索方法。图2显示了每个玩家的思考时间,以国际等级分衡量,相对于Stockfish或者Elmo,思考时间为40ms。AlphaZero的MCTS的思考时间比Stockfish或Elmo更有效,这使得人们对普遍持有的观点认为alpha-beta搜索在这些领域本质上是优越的产生了质疑。 # 最后,我们分析了AlphaZero发现的国际象棋知识。表2分析了最常见的人类开局(在人类国际象棋游戏的在线数据库中出现过超过10万次)。在自我对弈训练期间,AlphaZero独立地发现和使用了这些开局。从每个人类的开局开始,AlphaZero击败了Stockfish,表明它确实掌握了广泛的国际象棋玩法。 # 国际象棋代表了过去几十年人工智能研究的巅峰。最先进的象棋程序基于强大的引擎,搜索数以百万计的局面,利用领域的专业知识和复杂的领域适应性。AlphaZero是一个通用的强化学习算法 - 最初为围棋而设计 - 在几个小时内取得了优异的成绩,搜索次数减少了1000倍,除了国际象棋规则之外不需要任何领域知识。此外,同样的算法不经修改也适用于更具挑战性的将棋游戏,在几小时内再次超越了当前最先进的水平。 # ![b3](./assets/b3.png) # ![b4](./assets/b4.png) # 表2:分析12个最受欢迎的人类开局(在线数据库中出现超过10万次)。每个开局标有其ECO代码和通用名称。该图显示了AlphaZero在自我训练比赛时使用的每次开局的比例。我们还从AlphaZero的角度报告了从每个开局开始与Stockfish 100场比赛的胜负/平局/失败结果,无论是白色(W)还是黑色(B)。最后,从每个开局提供AlphaZero的主要变着(PV)。 # # Table 2: Analysis of the 12 most popular human openings (more than 100,000 occurrences in online databases). Each opening is marked with its ECO code and common name. The graph shows the ratio of each opening used by AlphaZero in self-training competitions. We also reported the results of 100 matches with Stockfish from the perspective of AlphaZero, regardless of whether it was white (W) or black (B). Finally, the main change (PV) of AlphaZero is provided from each opening. # ![b5](./assets/b5.png) # 图2:关于AlphaZero思考时间的可扩展性,以国际等级分衡量。a在国际象棋中的AlphaZero和Stockfish的表现,描画每一步的思考时间。b在将棋中AlphaZero和Elmo的表现,描画每一步的思考时间。 # # 方法 # ## 计算机国际象棋程序剖析 # 在本节中,我们将描述一个典型的计算机国际象棋程序的组件,特别关注Stockfish,这是一个赢得2016年TCEC电脑国际象棋锦标赛的开源程序。 # 每个局面s由手工特征φ(s)的稀疏向量描述,包括特定中局/残局[`子力`](http://chessprogramming.wikispaces.com/material)(译者注:由每条线上棋子价值的总和确定的一个术语。所有的棋子和兵。[`子力优势`](https://baike.baidu.com/item/%E5%9B%BD%E9%99%85%E8%B1%A1%E6%A3%8B%E6%9C%AF%E8%AF%AD/7549734?fr=aladdin)是棋手在棋盘上有比对手更多的棋子或棋子的价值更大。)点的价值,[`子力不平衡表`](http://chessprogramming.wikispaces.com/Material+Tables)(译者注:例如 车vs两个[`轻子`](https://baike.baidu.com/item/%E8%BD%BB%E5%AD%90)(Minor pieces:象和马),皇后vs两个车或三个轻子,三个兵vs普通棋子),[`Piece-Square表`](http://chessprogramming.wikispaces.com/Piece-Square+Tables)(译者注:给特定位置上的特定棋子分配一个值),[`机动性`](http://chessprogramming.wikispaces.com/Mobility)(译者注:衡量一个玩家在一个给定的位置上合法移动的选择数量,[`棋子的行动自由`](https://baike.baidu.com/item/%E5%9B%BD%E9%99%85%E8%B1%A1%E6%A3%8B%E6%9C%AF%E8%AF%AD/7549734?fr=aladdin)。)和[`被困棋子`](http://chessprogramming.wikispaces.com/Trapped+Pieces) # # # (译者注:被困棋子是移动性差的极端例子),[`兵型`](http://chessprogramming.wikispaces.com/Pawn+Structure)(译者注:用来描述棋盘上所有兵的位置,忽略所有其他棋子。也指兵骨架。所有兵的位置的各个方面。),[`国王安全性`](http://chessprogramming.wikispaces.com/King+Safety),[`前哨`](http://chessprogramming.wikispaces.com/Outposts)(译者注:通常与马在棋盘中心或敌方一侧有关的国际象棋术语,被自己的棋子保护不再受对手棋子的攻击,或者在半开放线削弱对手的棋子,不再徒劳无功),[`双象`](https://en.wikipedia.org/wiki/Glossary_of_chess#Bishop_pair)(译者注:棋手是否有两个象),和其他复杂的评估 模型。通过手动和自动调整的组合,每个特征$φ_i$被分配相应的权重$w_i$,并且通过线性组合$v(s,w)=φ(s)^T w$来评估局面。然而,对于安全的位置,这个原始评估仅被认为是准确的,不包括未解决的[`吃子`](http://chessprogramming.wikispaces.com/Captures)和[`将军`](http://chessprogramming.wikispaces.com/Check)。在应用评估函数之前,使用领域专用的[`静止搜索`](http://chessprogramming.wikispaces.com/Quiescence+Search)来解决正在进行的战术局势。 # 局面s的最终评估是通过使用静止搜索评估每个叶子的极小极大搜索来计算的。alpha-beta剪枝被用来安全地剪切任何可能被另一个变着控制的分支。额外的剪切是使用愿望窗口和主要变着搜索实现的。其他剪枝策略包括无效走子修剪(假定走子以后结果比任何变着还要差),徒劳修剪(假设知道评估中可能的最大变着),和其他依赖于领域的修剪规则(假设知道被吃棋子的价值 # 搜索的重点是关注有希望的变着,通过扩展有希望的变着的搜索深度,并通过基于历史,静态交换评估(SEE)和移动的棋子类型等启发式技术减少没有希望的变着的搜索深度。扩展是基于独立于领域的规则,这些规则用于识别单一的走子,没有合适的选择余地,以及依赖于领域的规则,比如扩展检查走子。减少(译者注:搜索深度),如后期走子减少,主要依赖于领域知识。 # alpha-beta搜索的效率主要取决于下棋走子的顺序。因此,走子是通过迭代加深来排序的(使用更浅的搜索命令移动以进行更深入的搜索)。此外,结合了与领域无关的启发式走子排序,如杀手启发式,历史启发式,相反走子启发式,以及基于捕获(SEE)和潜在捕获(MVV / LVA)的领域相关知识。 # [`换位表`] closed site(译者注:是存储先前执行的搜索的结果的数据库)便于重复使用在多个路径达到相同位置时的下棋顺序和值。经过仔细调整的开局库用于在棋局开始时选择走子。通过对残局位置的彻底逆向分析预先设计的残局库,在六个、有时七个或更少的所有位置提供最佳的走子。 # 其他强大的国际象棋程序,以及像“深蓝”这样的早期程序,都使用了非常类似的架构,包括上述的大部分组件,虽然重要的细节差别很大。 # AlphaZero不使用本节中描述的技术。这些技术中的一些可能会进一步提高AlphaZero的性能;然而,我们专注于纯粹的自我对弈强化学习方法,并将这些扩展留给未来研究。 # ## 计算机国际象棋和将棋上的早期工作 # 在本节中,我们将讨论一些关于计算机国际象棋在强化学习上的重要早期工作成果。 # NeuroChess通过使用175个手工输入特征的神经网络评估局面。它被训练通过时序差分学习预测最终的游戏结果,以及两步棋之后的预期特征。NeuroChess赢得了对GnuChess 13%的比赛,使用固定的深度2搜索。 # Beal和Smith应用时序差分学习来估计国际象棋和将棋中的棋子值,从随机值开始,单独通过自我对弈来学习。 # KnightCap通过一个神经网络来评估局面,这个神经网络使用了一个基于哪个区域受到哪些棋子攻击或防守的知识的攻击表。它是由时序差分学习的一种变体(称为TD(叶))进行训练的,它更新了alpha-beta搜索的主要变着的叶子值。KnightCap在训练之后与使用手动初始化棋子值权重的强大计算机对手对弈,达到了人类大师级别的能力。 # Meep通过基于手工特征的线性评估函数来评估局面。它是由另一个时序差分学习的变种(被称为TreeStrap)训练的,它更新了一个alpha-beta搜索的所有节点。Meep经过随机初始权重自我对弈训练之后,在15场比赛中13场击败了人类国际大师级棋手。 # Kaneko和Hoki通过学习在alpha-beta 搜索期间选择人类专家的走子,来训练包括一百万个特征的将棋评估函数的权重。他们还基于根据专家棋局日志调整的极小极大搜索进行了大规模优化;这是获得2013年世界计算机将棋冠军的Bonanza引擎的一部分。 # Giraffe通过一个神经网络评估局面,包括移动能力地图和描述每个方格(走子点)的攻击者和防御者的最低值的攻击和捍卫地图。它通过使用TD(叶)的自我对弈训练,也达到了与国际大师相当的水平。 # DeepChess训练了一个神经网络来执行成对的局面评估。它是通过监督学习从人类专家对弈数据库进行训练的,这些棋局是经过预先过滤的,以避免吃子棋和平局。DeepChess达到了一个强大的特级大师的水平。 # 所有这些程序都将他们学到的评估函数与各种扩展增强的alpha-beta搜索功能相结合。 # 基于使用像AlphaZero策略迭代的双重策略和价值网络的训练方法已经成功应用于改进Hex的最新技术。 # ## MCTS 和Alpha-Beta 搜索 # 至少四十年来,最强大的计算机国际象棋程序已经使用alpha-beta搜索。AlphaZero使用明显不同的方法来平均子树内的局面评估,而不是计算该子树的最小最大估计。但是,使用传统MCTS的国际象棋程序比Alpha-Beta搜索程序弱得多;而基于神经网络的alpha-beta程序以前不能与更快的手工评估函数对抗。 # AlphaZero局面评估使用基于深度神经网络的非线性函数逼近,而不是典型国际象棋程序中使用的线性函数逼近。这提供了更强大的表示,但是也可能引入虚假逼近误差。MCTS对这些近似误差进行平均,因此在评估大型子树时趋向于误差抵消。相比之下,alpha-beta搜索计算明确的最小最大值,它将最大的近似误差传播到子树的根节点。使用MCTS可以允许AlphaZero将其神经网络的表示与强大的、独立于领域的搜索有效地结合起来。 # ### 领域知识 Domain knowledge # - 1.描述位置的输入特征和描述走子的输出特征被构造为一组平面;即神经网络结构与棋盘的网格结构相匹配。 # - 2.为AlphaZero提供了完善的游戏规则知识。这些在MCTS期间被用来模拟由一系列走子产生的位置,以确定游戏的结束,并对达到结束状态的任何模拟对弈进行评分。 # - 3.对规则的了解也被用来编码输入平面(即[`王车易位`](Castling),[`重复局面`](https://baike.baidu.com/item/%E5%9B%BD%E9%99%85%E8%B1%A1%E6%A3%8B%E6%9C%AF%E8%AF%AD/7549734?fr=aladdin),没有进展)和输出平面(棋子如何走子,升变和将棋中的[`取驹`](https://baike.baidu.com/item/%E5%B0%86%E6%A3%8B/491643)([`piece drops`](https://en.wikipedia.org/wiki/Shogi#Drops)))。 # - 4.合法走子的典型数量用于缩放探索噪音(见下文)。 # - 5.国际象棋和将棋比赛超过最大步数(由典型比赛长度决定)将被终止,并被判为平局;围棋比赛结束,使用Tromp-Taylor规则打分。 # 除了上面列出的要点,AlphaZero没有使用任何形式的领域知识。 # ### 表示 Expression # 在本节中,我们将描述棋盘输入的表示形式,以及AlphaZero中神经网络使用的走子动作输出的表示形式。其他的表示本来也可以使用; 在我们的实验中,训练算法对于许多合理的选择可以有效地工作。 # ![b6](./assets/b6.png) # 表S1:分别在围棋,国际象棋和将棋中AlphaZero使用的输入特征。第一组特征是8步历史走子记录的每个局面。计数由实数值表示;其他输入特征通过使用指定数量的二值输入平面的独热编码来表示。当前玩家由P1表示,对手由P2表示。 # 神经网络的输入是N × N ×(MT + L)图像栈,其表示状态使用大小为N×N的T组M个平面的级联组成。每一组平面代表时间步t-T + 1,...,t的棋盘位置,在小于1的时间步中设置为零。棋盘朝向当前玩家的角度。M特征平面由棋手存在的棋子的二值特征平面组成,每个棋子类型具有一个平面,第二组平面表示对手存在的棋子。对于将棋,还有额外的平面显示每种类型的持驹数。还有一个额外的L个常值输入平面,表示玩家的颜色,总的回合数量和特殊规则的状态:在国际象棋合法的王车易位(王翼或者后翼);局面的重复次数(3次重复在国际象棋中自动判为平局;在将棋中是4次);和在国际象棋中没有进展的走子次数(没有进展的50次走子自动判为平局)。表格S1中总结了输入特征。 # 下棋走子可以分为两部分:选择要移动的棋子,然后在棋子的合法棋步中进行选择。我们用一个8 × 8 × 73的平面栈来表示策略π(a|s),它编码了4,672个可能的走子的概率分布。每个8×8的位置标识从哪个方块“拾取”一个棋子。前56个平面编码对于任何棋子可能的“皇后走子”,沿着八个相对方向{北,东北,东,东南,南,西南,西,西北}中的一个,若干方块[1..7]中将被吃的棋子。接下来的8个平面编码可能的马的走子。最后的9个平面编码对于兵底线升变后在对角线上可能的走子和吃子,分别对于车,马或象。和其他兵从第七横线升变为为皇后的走子或吃子。 # 将棋中的策略由一个9 × 9 × 139的平面栈表示,类似地对11,259个可能的走子进行概率分布编码。前64个平面编码“皇后走子”,接下来的2个编码马走子。另有64 + 2个平面分别编码升变成皇后的走子和升变成马的走子。最后7个平面编码将一个被捕获的棋子放回棋盘上的位置。 # 围棋中的策略与AlphaGo Zero表示相同,使用一个包含19 × 19 + 1的走子平坦分布代表可能的放置和走子。我们在国际象棋和将棋上也尝试用关于走子的平坦分布,最后的结果几乎相同,尽管训练稍微慢了点。 # ![b7](./assets/b7.png) # 表S2:国际象棋和将棋中AlphaZero使用的动作表示。该策略是由一堆编码合法走子概率分布的平面表示的;平面对应于表中的条目。 # 表S2中总结了行动表示。通过将概率设置为零,并将使用的走子概率重新归一化,可以屏蔽非法走子。 # ### 配置 # 在训练期间,每个MCTS使用了800次模拟。棋局的数量,局面和思考时间由于不同的棋盘大小和游戏长度而有所不同,如表S3所示。 # 每场比赛的学习率为0.2,在训练过程中分别下降了三次(分别为0.02,0.002和0.0002)。走子的选择与根节点的访问次数成正比。Dirichlet噪声Dir(α)被添加到根节点的先验概率中;这个比例与典型位置的合法走子的近似数量成反比,分别为国际象棋、将棋和围棋的α 取{0.3,0.15,0.03}。除非另有说明,否则训练和搜索算法和参数与AlphaGo Zero相同。 # ![b8](./assets/b8.png) # 表S3:国际象棋,将棋和围棋中AlphaZero训练的选择统计。 # 在评估期间,AlphaZero选择走子使用关于根节点访问次数的贪婪方法。每台MCTS在一台带有4个TPU的机器上执行。 # ### 评估 Evaluate # 为了评估国际象棋的性能,我们使用了Stockfish版本8(官方的Linux版本)作为基准程序,使用64个CPU线程和1GB Hash。 # 为了评估将棋中的性能,我们使用了Elmo版本WCSC27并结合了有64个CPU线程和1GB Hash,EnteringKingRule的usi选项设置为NoEnteringKing,YaneuraOu 2017早期的KPPT 4.73 64AVX2。我们通过测量每个选手的国际等级分来评估AlphaZero的相对强度(图1)。我们通过Logistic函数p(a defeats b) =$\frac{1}{1+exp (c_{elo} (e(b)-e(a)) }$估计玩家a击败玩家b的概率,并通过贝叶斯逻辑回归估计等级分e(·),用BayesElo程序使用标准常数$c_{elo}$ = 1/400计算。国际等级评分是根据与AlphaZero在训练迭代期间进行比赛1秒每次走子的结果计算得出的,同时Stockfish,Elmo或者AlphaGo Lee分别是基准选手。基准玩家的国际等级分是以公开可用的价值为基础的。 # 我们还测量了AlphaZero对每个基准玩家快棋的表现。设置被选择为符合计算机象棋比赛的条件:每个棋手允许1分钟下一步棋,所有棋手都可以投降认负(Stockfish和Elmo 10次连续走子-900 centipawns,AlphaZero 5%胜率)。所有玩家的思考都被禁止了。 # ![b9](./assets/b9.png) # 表S4:国际象棋、将棋和围棋中AlphaZero,Stockfish和Elmo的评估速度(局面/秒)
Mastering_Chess_and_Shogi_by_Self-Play_with_a_General_Reinforcement_Learning_Algorithm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Rotation # + [markdown] tags=[] # *Modeling and Simulation in Python* # # Copyright 2021 <NAME> # # License: [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/) # + tags=[] # download modsim.py if necessary from os.path import basename, exists def download(url): filename = basename(url) if not exists(filename): from urllib.request import urlretrieve local, _ = urlretrieve(url, filename) print('Downloaded ' + local) download('https://raw.githubusercontent.com/AllenDowney/' + 'ModSimPy/master/modsim.py') # + tags=[] # import functions from modsim from modsim import * # + [markdown] tags=[] # [Click here to run this chapter on Colab](https://colab.research.google.com/github/AllenDowney/ModSimPy/blob/master//chapters/chap24.ipynb) # - # In this chapter and the next we'll model systems that involve rotating objects. # In general, rotation is complicated. # In three dimensions, objects can rotate around three axes and many objects are easier to spin around some axes than others. # # If the configuration of an object changes over time, it might become # easier or harder to spin, which explains the surprising dynamics of # gymnasts, divers, ice skaters, etc. # # And when you apply a twisting force to a rotating object, the effect is often contrary to intuition. # For an example, see this video on gyroscopic precession: <http://modsimpy.com/precess>. # We will not take on the physics of rotation in all its glory; rather, we will focus on simple scenarios where all rotation and all twisting forces are around a single axis. # In that case, we can treat some vector quantities as if they were scalars, in the same way that we sometimes treat velocity as a scalar with an implicit direction. # # The fundamental ideas in these examples are angular velocity, angular acceleration, torque, and moment of inertia. # If you are not already familiar with these concepts, I will define them as we go along, and I will point to additional reading. # ## The Physics of Toilet Paper # # As an example of a system with rotation, we'll simulate the manufacture of a roll of toilet paper, as shown in this video: <https://youtu.be/Z74OfpUbeac?t=231>. # Starting with a cardboard tube at the center, we will roll up 47 m of paper, a typical length for a roll of toilet paper in the U.S. (see <http://modsimpy.com/paper>). # # The following figure shows a diagram of the system: $r$ represents # the radius of the roll at a point in time. Initially, $r$ is the radius of the cardboard core, $R_{min}$. When the roll is complete, $r$ is $R_{max}$. # # ![Diagram of a roll of toilet paper, showing change in paper length as a result of a small rotation, $d\theta$.](https://github.com/AllenDowney/ModSim/raw/main/figs/paper_roll.png) # # I'll use $\theta$ to represent the total rotation of the roll in radians. In the diagram, $d\theta$ represents a small increase in $\theta$, which corresponds to a distance along the circumference of $r~d\theta$. # I'll use $y$ to represent the total length of paper that's been rolled. # Initially, $\theta=0$ and $y=0$. # For each small increase in $\theta$, there is a corresponding increase in $y$: # # $$dy = r~d\theta$$ # # If we divide both sides by a small increase in time, $dt$, we get a # differential equation for $y$ as a function of time. # # $$\frac{dy}{dt} = r \frac{d\theta}{dt}$$ # # As we roll up the paper, $r$ increases. Assuming it increases by a fixed amount per revolution, we can write # # $$dr = k~d\theta$$ # # Where $k$ is an unknown constant we'll have to figure out. # Again, we can divide both sides by $dt$ to get a differential equation in time: # # $$\frac{dr}{dt} = k \frac{d\theta}{dt}$$ # # Finally, let's assume that $\theta$ increases at a constant rate of $\omega = 300$ rad/s (about 2900 revolutions per minute): # # $$\frac{d\theta}{dt} = \omega$$ # # This rate of change is called an *angular velocity*. Now we have a system of differential equations we can use to simulate the system. # ## Setting Parameters # # Here are the parameters of the system: Rmin = 0.02 # m Rmax = 0.055 # m L = 47 # m omega = 300 # rad / s # `Rmin` and `Rmax` are the initial and final values for the radius, `r`. # `L` is the total length of the paper. # `omega` is the angular velocity in radians per second. # # Figuring out `k` is not easy, but we can estimate it by pretending that `r` is constant and equal to the average of `Rmin` and `Rmax`: Ravg = (Rmax + Rmin) / 2 # In that case, the circumference of the roll is also be constant: Cavg = 2 * np.pi * Ravg # And we can compute the number of revolutions to roll up length `L`, like this. revs = L / Cavg # Converting rotations to radians, we can estimate the final value of `theta`. theta = 2 * np.pi * revs theta # Finally, `k` is the total change in `r` divided by the total change in `theta`. k_est = (Rmax - Rmin) / theta k_est # At the end of the chapter, we'll derive `k` analytically, but this estimate is enough to get started. # ## Simulating the System # # The state variables we'll use are, `theta`, `y`, and `r`. # Here are the initial conditions: init = State(theta=0, y=0, r=Rmin) # And here's a `System` object with `init` and `t_end`: system = System(init=init, t_end=10) # Now we can use the differential equations from the previous section to # write a slope function: def slope_func(t, state, system): theta, y, r = state dydt = r * omega drdt = k_est * omega return omega, dydt, drdt # As usual, the slope function takes a time stamp, a `State` object, and a `System` object. # # The job of the slope function is to compute the time derivatives of the state variables. # # The derivative of `theta` is angular velocity, `omega`. # The derivatives of `y` and `r` are given by the differential equations we derived. # And as usual, we'll test the slope function with the initial conditions. slope_func(0, system.init, system) # We'd like to stop the simulation when the length of paper on the roll is `L`. We can do that with an event function that passes through 0 when `y` equals `L`: def event_func(t, state, system): theta, y, r = state return L - y # We can test it with the initial conditions: event_func(0, system.init, system) # Now let's run the simulation: results, details = run_solve_ivp(system, slope_func, events=event_func) details.message # Here are the last few time steps. results.tail() # The time it takes to complete one roll is about 4.2 seconds, which is consistent with what we see in the video. results.index[-1] # The final value of `y` is 47 meters, as expected. final_state = results.iloc[-1] final_state.y # The final value of radius is 0.55 m, which is `Rmax`. final_state.r # The total number of rotations is close to 200, which seems plausible. radians = final_state.theta rotations = radians / 2 / np.pi rotations # As an exercise, we'll see how fast the paper is moving. But first, let's take a closer look at the results. # ## Plotting the Results # # Here's what `theta` looks like over time. # + def plot_theta(results): results.theta.plot(color='C0', label='theta') decorate(xlabel='Time (s)', ylabel='Angle (rad)') plot_theta(results) # - # `theta` grows linearly, as we should expect with constant angular velocity. # # Here's what `r` looks like over time. # + def plot_r(results): results.r.plot(color='C2', label='r') decorate(xlabel='Time (s)', ylabel='Radius (m)') plot_r(results) # - # `r` also increases linearly. # # But since the derivative of `y` depends on `r`, and `r` is increasing, `y` grows with increasing slope. # + def plot_y(results): results.y.plot(color='C1', label='y') decorate(xlabel='Time (s)', ylabel='Length (m)') plot_y(results) # - # In the next section, we'll see that we could have solved the # differential equations analytically. # However, it is often useful to start with simulation as a way of exploring and checking assumptions. # ## Analysis # # Since angular velocity is constant: # # $$\frac{d\theta}{dt} = \omega \quad\quad (1)$$ # # We can find $\theta$ as a function of time by integrating both sides: # # $$\theta(t) = \omega t$$ # # Similarly, we can solve this equation # # $$\frac{dr}{dt} = k \omega$$ # # to find # # $$r(t) = k \omega t + R_{min}$$ # # Then we can plug the solution for $r$ into the equation for $y$: # # $$\begin{aligned} # \frac{dy}{dt} & = r \omega \quad\quad (2) \\ # & = \left[ k \omega t + R_{min} \right] \omega \nonumber\end{aligned}$$ # # Integrating both sides yields: # # $$y(t) = \left[ k \omega t^2 / 2 + R_{min} t \right] \omega$$ # # So $y$ is a parabola, as you might have guessed. # We can also use these equations to find the relationship between $y$ and $r$, independent of time, which we can use to compute $k$. # Dividing Equations 1 and 2, yields # # $$\frac{dr}{dy} = \frac{k}{r}$$ # # Separating variables yields # # $$r~dr = k~dy$$ # # Integrating both sides yields # # $$r^2 / 2 = k y + C$$ # # Solving for $y$, we have # # $$y = \frac{1}{2k} (r^2 - C) \label{eqn3}$$ # # When $y=0$, $r=R_{min}$, so # # $$R_{min}^2 / 2 = C$$ # # When $y=L$, $r=R_{max}$, so # # $$L = \frac{1}{2k} (R_{max}^2 - R_{min}^2)$$ # # Solving for $k$ yields # # $$k = \frac{1}{2L} (R_{max}^2 - R_{min}^2) \label{eqn4}$$ # # Plugging in the values of the parameters yields `2.8e-5` m/rad, the same as the "estimate" we computed in Section xxx. k = (Rmax**2 - Rmin**2) / (2 * L) k # In this case the estimate turns out to be exact. # ## Summary # # This chapter introduces rotation, starting with an example where angular velocity is constant. # # We simulated the manufacture of a roll of toilet paper, then we solved the same problem analytically. # # In the next chapter, we'll see a more interesting example where angular velocity is not constant. And we'll introduce three new concepts: torque, angular acceleration, and moment of inertia. # # But first, you might want to work on the following exercise. # ## Exercises # # This chapter is available as a Jupyter notebook where you can read the text, run the code, and work on the exercises. # You can access the notebooks at <https://allendowney.github.io/ModSimPy/>. # ### Exercise 1 # # Since we keep `omega` constant, the linear velocity of the paper increases with radius. We can use `gradient` to estimate the derivative of `results.y`. dydt = gradient(results.y) dydt.plot(label='dydt') decorate(xlabel='Time (s)', ylabel='Linear velocity (m/s)') # With constant angular velocity, linear velocity is increasing, reaching its maximum at the end. max_linear_velocity = dydt.iloc[-1] max_linear_velocity # Now suppose this peak velocity is the limiting factor; that is, we can't move the paper any faster than that. # # In that case, we might be able to speed up the process by keeping the linear velocity at the maximum all the time. # # Write a slope function that keeps the linear velocity, `dydt`, constant, and computes the angular velocity, `omega`, accordingly. # # Run the simulation and see how much faster we could finish rolling the paper. # + # Solution goes here # + # Solution goes here # + # Solution goes here # + # Solution goes here # + # Solution goes here # + # Solution goes here # + # Solution goes here # -
chapters/chap24.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # [ATM 623: Climate Modeling](../index.ipynb) # [<NAME>](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany # # Lecture 7: Grey radiation modeling with climlab # ### About these notes: # # This document uses the interactive [`IPython notebook`](http://ipython.org/notebook.html) format (now also called [`Jupyter`](https://jupyter.org)). The notes can be accessed in several different ways: # # - The interactive notebooks are hosted on `github` at https://github.com/brian-rose/ClimateModeling_courseware # - The latest versions can be viewed as static web pages [rendered on nbviewer](http://nbviewer.ipython.org/github/brian-rose/ClimateModeling_courseware/blob/master/index.ipynb) # - A complete snapshot of the notes as of May 2015 (end of spring semester) are [available on Brian's website](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/index.html). # # Many of these notes make use of the `climlab` package, available at https://github.com/brian-rose/climlab # ## Contents # # 1. [Introducing `climlab`](#section1) # 2. [Using `climlab` to implement the two-layer leaky greenhouse model](#section2) # 3. [The observed annual, global mean temperature profile](#section3) # 4. [A 30-layer model using the observed temperatures](#section4) # 5. [Radiative forcing in the 30-layer model](#section5) # 6. [Radiative equilibrium in the 30-layer model](#section6) # 7. [Radiative-Convective Equilibrium in the 30-layer model](#section7) # 8. [Putting stratospheric ozone in the grey-gas model](#section8) # ____________ # <a id='section1'></a> # # ## 1. Introducing `climlab` # ____________ # # ``climlab`` is a flexible engine for process-oriented climate modeling. # It is based on a very general concept of a model as a collection of individual, # interacting processes. ``climlab`` defines a base class called ``Process``, which # can contain an arbitrarily complex tree of sub-processes (each also some # sub-class of ``Process``). Every climate process (radiative, dynamical, # physical, turbulent, convective, chemical, etc.) can be simulated as a stand-alone # process model given appropriate input, or as a sub-process of a more complex model. # New classes of model can easily be defined and run interactively by putting together an # appropriate collection of sub-processes. # # ``climlab`` is a work-in-progress, and the code base will evolve substantially over the course of this semester. # The latest code can always be found on ``github``: # # https://github.com/brian-rose/climlab # # You are strongly encouraged to clone the ``climlab`` repository and use ``git`` to keep your local copy up-to-date. # # Running this notebook requires that ``climlab`` is already installed on your system. # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import netCDF4 as nc import climlab # ____________ # <a id='section2'></a> # # ## 2. Using `climlab` to implement the two-layer leaky greenhouse model # ____________ # # One of the things that ``climlab`` is set up to do is the grey-radiation modeling we have already been discussing. # # Since we already derived a [complete analytical solution to the two-layer leaky greenhouse model](Lecture06 -- Elementary greenhouse models.ipynb), we will use this to validate the `climlab` code. # # # ### Validation # # We want to verify that the model reproduces the observed OLR given observed temperatures, and the absorptivity that we tuned in the analytical model. The target numbers are: # # \begin{align} # T_s &= 288 \text{ K} \\ # T_0 &= 275 \text{ K} \\ # T_1 &= 230 \text{ K} \\ # \end{align} # # $$ \epsilon = 0.58377 $$ # # $$ OLR = 239 \text{ W m}^{-2} $$ # # ### Initialize a model in `climlab` # The first thing we do is create a new model. # # The following example code is sparsely commented but will hopefully orient you on the basics of defining and working with a `climlab Process` object. # Test in a 2-layer atmosphere col = climlab.GreyRadiationModel(num_lev=2) print col col.subprocess col.state col.Ts col.Ts[:] = 288. col.Tatm[:] = np.array([275., 230.]) col.state LW = col.subprocess['LW'] print LW LW.absorptivity LW.absorptivity = 0.58377 LW.absorptivity # This does all the calculations that would be performed at each time step, # but doesn't actually update the temperatures col.compute_diagnostics() col.diagnostics # Check OLR against our analytical solution col.diagnostics['OLR'] col.state # perform a single time step col.step_forward() col.state # integrate out to radiative equilibrium col.integrate_years(2.) # Check for equilibrium col.diagnostics['ASR'] - col.diagnostics['OLR'] # Compare these temperatures against our analytical solutions for radiative equilibrium col.state # So it looks like `climlab` agrees with our analytical results. That's good. # ____________ # <a id='section3'></a> # # ## 3. The observed annual, global mean temperature profile # ____________ # # We want to model the OLR in a column whose temperatures match observations. As we've done before, we'll calculate the global, annual mean air temperature from the NCEP Reanalysis data. # This will try to read the data over the internet. # If you have a local copy of the data, just use the local path to the .nc file instead of the URL ncep_url = "http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis.derived/" ncep_air = nc.Dataset( ncep_url + "pressure/air.mon.1981-2010.ltm.nc" ) level = ncep_air.variables['level'][:] lat = ncep_air.variables['lat'][:] # A log-pressure height coordinate zstar = -np.log(level/1000) Tzon = np.mean(ncep_air.variables['air'][:],axis=(0,3)) Tglobal = np.average( Tzon , weights=np.cos(np.deg2rad(lat)), axis=1) + climlab.constants.tempCtoK # Note the useful conversion factor. climlab.constants has lots of commonly used constant pre-defined # Here we are plotting with respect to log(pressure) but labeling the axis in pressure units fig = plt.figure( figsize=(8,6) ) ax = fig.add_subplot(111) ax.plot( Tglobal , zstar ) yticks = np.array([1000., 750., 500., 250., 100., 50., 20., 10.]) ax.set_yticks(-np.log(yticks/1000.)) ax.set_yticklabels(yticks) ax.set_xlabel('Temperature (K)', fontsize=16) ax.set_ylabel('Pressure (hPa)', fontsize=16 ) ax.set_title('Global, annual mean sounding from NCEP Reanalysis', fontsize = 24) ax.grid() # ____________ # <a id='section4'></a> # # ## 4. A 30-layer model using the observed temperatures # ____________ # # # initialize a grey radiation model with 30 levels col = climlab.GreyRadiationModel() print col # interpolate to 30 evenly spaced pressure levels lev = col.lev Tinterp = np.flipud(np.interp(np.flipud(lev), np.flipud(level), np.flipud(Tglobal))) Tinterp # Need to 'flipud' because the interpolation routine needs the pressure data to be in increasing order # Initialize model with observed temperatures col.Ts[:] = Tglobal[0] col.Tatm[:] = Tinterp # A handy re-usable routine for making a plot of the temperature profiles # We will plot temperatures with respect to log(pressure) to get a height-like coordinate def plot_sounding(collist): color_cycle=['r', 'g', 'b', 'y'] # col is either a column model object or a list of column model objects if isinstance(collist, climlab.Process): # make a list with a single item collist = [collist] fig = plt.figure() ax = fig.add_subplot(111) for i, col in enumerate(collist): zstar = -np.log(col.lev/climlab.constants.ps) ax.plot(col.Tatm, zstar, color=color_cycle[i]) ax.plot(col.Ts, 0, 'o', markersize=12, color=color_cycle[i]) #ax.invert_yaxis() yticks = np.array([1000., 750., 500., 250., 100., 50., 20., 10.]) ax.set_yticks(-np.log(yticks/1000.)) ax.set_yticklabels(yticks) ax.set_xlabel('Temperature (K)') ax.set_ylabel('Pressure (hPa)') ax.grid() return ax # This should look just like the observations plot_sounding(col) # ### Tune absorptivity to get observed OLR col.compute_diagnostics() col.diagnostics['OLR'] # Need to tune absorptivity to get OLR = 239 epsarray = np.linspace(0.01, 0.1, 100) OLRarray = np.zeros_like(epsarray) # + for i in range(epsarray.size): col.subprocess['LW'].absorptivity = epsarray[i] col.compute_diagnostics() OLRarray[i] = col.diagnostics['OLR'] plt.plot(epsarray, OLRarray) plt.grid() # - # The necessary value seems to lie near 0.055 or so. # # We can be more precise with a numerical root-finder. def OLRanom(eps): col.subprocess['LW'].absorptivity = eps col.compute_diagnostics() return col.diagnostics['OLR'] - 239. # Use numerical root-finding to get the equilibria from scipy.optimize import brentq # brentq is a root-finding function # Need to give it a function and two end-points # It will look for a zero of the function between those end-points eps = brentq(OLRanom, 0.01, 0.1) print eps col.subprocess['LW'].absorptivity = eps col.subprocess['LW'].absorptivity col.compute_diagnostics() col.diagnostics['OLR'] # ____________ # <a id='section5'></a> # # ## 5. Radiative forcing in the 30-layer model # ____________ # # Let's compute radiative forcing for a **2% increase in absorptivity**. # clone our model using a built-in climlab function col2 = climlab.process_like(col) print col2 col2.subprocess['LW'].absorptivity *= 1.02 col2.subprocess['LW'].absorptivity # Radiative forcing by definition is the change in TOA radiative flux, HOLDING THE TEMPERATURES FIXED. col2.Ts - col.Ts col2.Tatm - col.Tatm col2.compute_diagnostics() col2.diagnostics['OLR'] # The OLR decreased after we added the extra absorbers, as we expect. Now we can calculate the Radiative Forcing: RF = -(col2.diagnostics['OLR'] - col.diagnostics['OLR']) print 'The radiative forcing is %f W/m2.' %RF # ____________ # <a id='section6'></a> # # ## 6. Radiative equilibrium in the 30-layer model # ____________ # re = climlab.process_like(col) # To get to equilibrium, we just time-step the model forward long enough re.integrate_years(2.) # Check for energy balance re.diagnostics['ASR'] - re.diagnostics['OLR'] plot_sounding([col, re]) # Some properties of the **radiative equilibrium** temperature profile: # # - The surface is warmer than observed. # - The lower troposphere is colder than observed. # - Very cold air is sitting immediately above the warm surface. # - There is no tropopause, no stratosphere. # ____________ # <a id='section7'></a> # # ## 7. Radiative-Convective Equilibrium in the 30-layer model # ____________ # # We recognize that the large drop in temperature just above the surface is unphysical. Parcels of air in direct contact with the ground will be warmed by mechansisms other than radiative transfer. # # These warm air parcels will then become buoyant, and will convect upward, mixing their heat content with the environment. # # We **parameterize** the statistical effects of this mixing through a **convective adjustment**. # # At each timestep, our model checks for any locations at which the **lapse rate** exceeds some threshold. Unstable layers are removed through an energy-conserving mixing formula. # # This process is assumed to be fast relative to radiative heating. In the model, it is instantaneous. rce = climlab.RadiativeConvectiveModel(adj_lapse_rate=6.) print rce # This model is exactly like our previous models, except for one additional subprocess called ``convective adjustment``. # # We passed a parameter ``adj_lapse_rate`` (in K / km) that sets the neutrally stable lapse rate -- in this case, 6 K / km. # # This number is chosed to very loosely represent the net effect of **moist convection**. We'll look at this in more detail later. # Set our tuned absorptivity value rce.subprocess['LW'].absorptivity = eps # Run out to equilibrium rce.integrate_years(2.) # Check for energy balance rce.diagnostics['ASR'] - rce.diagnostics['OLR'] # Make a plot to compare observations, Radiative Equilibrium, and Radiative-Convective Equilibrium plot_sounding([col, re, rce]) # Introducing convective adjustment into the model cools the surface quite a bit (compared to Radiative Equilibrium, in green here) -- and warms the lower troposphere. It gives us a MUCH better fit to observations. # # But of course we still have no stratosphere. # ____________ # <a id='section8'></a> # # ## 8. Putting stratospheric ozone in the grey-gas model # ____________ # # Our model has no equivalent of the stratosphere, where temperature increases with height. That's because our model has been completely transparent to shortwave radiation up until now. # # We can load the observed ozone climatology from the input files for the CESM model: # + datapath = "http://ramadda.atmos.albany.edu:8080/repository/opendap/latest/Top/Users/Brian+Rose/CESM+runs/" endstr = "/entry.das" ozone = nc.Dataset( datapath + 'som_input/ozone_1.9x2.5_L26_2000clim_c091112.nc' + endstr ) # - print ozone.variables['O3'] lat_O3 = ozone.variables['lat'][:] lon_O3 = ozone.variables['lon'][:] lev_O3 = ozone.variables['lev'][:] # The pressure levels in this dataset are: print lev_O3 # ### Take the global average of the ozone climatology, and plot it as a function of pressure (or height) O3_zon = np.mean( ozone.variables['O3'][:],axis=(0,3) ) print O3_zon.shape O3_global = np.average( O3_zon, axis=1, weights=np.cos(np.deg2rad(lat_O3))) print O3_global ax = plt.figure(figsize=(10,8)).add_subplot(111) ax.plot( O3_global * 1.E6, -np.log(lev_O3/climlab.constants.ps) ) ax.set_xlabel('Ozone (ppm)', fontsize=16) ax.set_ylabel('Pressure (hPa)', fontsize=16 ) ax.set_yticks( -np.log(yticks/1000.) ) ax.set_yticklabels( yticks ) ax.grid() ax.set_title('Global, annual mean ozone concentration', fontsize = 24); # This shows that most of the ozone is indeed in the stratosphere, and peaks near the top of the stratosphere. # # Now create a new column model object **on the same pressure levels as the ozone data**. We are also going set an adjusted lapse rate of 6 K / km. oz_col = climlab.RadiativeConvectiveModel(lev = lev_O3, adj_lapse_rate=6) print oz_col # Now we will do something new: let the column absorb some shortwave radiation. We will assume that the shortwave absorptivity is proportional to the ozone concentration we plotted above. # # First we have to deal with a little inconsistency: print lev_O3 print oz_col.lev # The two arrays are in reverse order! # # So we need to flip the ozone data before using it: O3_flipped = np.flipud(O3_global) # Now we need to weight the absorptivity by the pressure (mass) of each layer. # This number is an arbitrary parameter that scales how absorptive we are making the ozone # in our grey gas model ozonefactor = 75 dp = oz_col.Tatm.domain.lev.delta epsSW = np.flipud(O3_global) * dp * ozonefactor # We want to use the field `epsSW` as the absorptivity for our SW radiation model. # # Let's see what the absorptivity is current set to: print oz_col.subprocess['SW'].absorptivity # It defaults to zero. # # Before changing this (putting in the ozone), let's take a look at the shortwave absorption in the column: oz_col.compute_diagnostics() oz_col.diagnostics['SW_absorbed_atm'] # Let's now put in the ozone: oz_col.subprocess['SW'].absorptivity = epsSW print oz_col.subprocess['SW'].absorptivity # Let's check how this changes the SW absorption: oz_col.compute_diagnostics() oz_col.diagnostics['SW_absorbed_atm'] # It is now non-zero, and largest near the top of the column (bottom of array) where the ozone concentration is highest. # Now it's time to run the model out to radiative-convective equilibrium oz_col.integrate_years(1.) print oz_col.diagnostics['ASR'] - oz_col.diagnostics['OLR'] # And let's now see what we got! # Make a plot to compare observations, Radiative Equilibrium, Radiative-Convective Equilibrium, and RCE with ozone! plot_sounding([col, re, rce, oz_col]) # And we finally have something that looks looks like the tropopause, with temperature increasing above at approximately the correct rate. # # There are still plenty of discrepancies between this model solution and the observations, including: # # - Tropopause temperature is too warm, by about 15 degrees. # - Surface temperature is too cold # # There are a number of parameters we might adjust if we wanted to improve the fit, including: # # - Longwave absorptivity # - Surface albedo # # Feel free to experiment! (That's what models are for, after all). # ### The take home message # # The dominant effect of stratospheric ozone is to vastly increase the radiative equilibrium temperature in the ozone layer. The temperature needs to be higher so that the longwave emission can balance the shortwave absorption. # # Without ozone to absorb incoming solar radiation, the **temperature does not increase with height**. # # This simple grey-gas model illustrates this principle very clearly. # <div class="alert alert-success"> # [Back to ATM 623 notebook home](../index.ipynb) # </div> # ____________ # ## Credits # # The author of this notebook is [<NAME>](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany. # # It was developed in support of [ATM 623: Climate Modeling](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/), a graduate-level course in the [Department of Atmospheric and Envionmental Sciences](http://www.albany.edu/atmos/index.php), offered in Spring 2015. # ____________ # ____________ # ## Version information # ____________ # # # %install_ext http://raw.github.com/jrjohansson/version_information/master/version_information.py # %load_ext version_information # %version_information numpy, climlab
Lectures/Lecture07 -- Grey radiation modeling with climlab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # Example 3 # ========= # # The next example demonstrates the multi-parameter capabilities of ``cgn``. We are going to solve a nonlinear least-squares problem that depends on two parameters $x$ and $y$, where $x$ has to satisfy a nonlinear equality constraint, while $y$ is only nonnegativity constrained. # # $ # \begin{align} # \min_{x \in \mathbb{R}^2, y \in \mathbb{R}} \quad & ||F(x, y)||_2^2 + \beta ||R(x - m)||_2^2\\ # \text{s. t.} \quad & x_1 + x_1^3 + x_2 + x_2^2 = 0, \quad y > 0, \\ # \text{where } \quad & F(x) = \left(\begin{matrix} # x_1 + e^{-x_2} + \sqrt{y} \\ # x_1^2 + 2 x_2 + 1 - \sqrt{y} # \end{matrix} \right), \\ # & R = \left( \begin{matrix} # 1 & 2 \\ # 3 & 4 # \end{matrix} \right), \quad m = \left(\begin{matrix} # 1 \\ 1 \\ # \end{matrix} \right), \quad \beta = 0.1. # \end{align} # $ # # Let us start by implementing the required functions and their derivatives: # + pycharm={"name": "#%%\n"} from math import exp, sqrt import numpy as np import cgn def F(x, y): out = np.array([x[0] + exp(-x[1] + sqrt(y[0])), x[0] ** 2 + 2 * x[1] + 1 - sqrt(y[0])]) return out def DF(x, y): jac = np.array([[1., -exp(-x[1]), 0.5 / sqrt(y[0])], [2 * x[0], 2., - 0.5 / sqrt(y[0])]]) return jac # + [markdown] pycharm={"name": "#%% md\n"} # Next, we set up the inequality constraint, which only depends on $x$: # + pycharm={"name": "#%%\n"} def g(x): out = x[0] + x[0] ** 3 + x[1] + x[1] ** 2 return np.array([out]) def Dg(x): jac = np.array([1 + 3 * x[0] ** 2, 1. + 2 * x[1]]).reshape((1, 2)) return jac # + [markdown] pycharm={"name": "#%% md\n"} # Next, we set up our ``cgn.Parameter`` objects. For the initial guess, let's just try $x = [0, 0]$ and $y = 1$: # + pycharm={"name": "#%%\n"} x = cgn.Parameter(name="x", start=np.zeros(2)) y = cgn.Parameter(name="y", start=np.ones(1)) # + [markdown] pycharm={"name": "#%% md\n"} # For ``x``, we have to specify the regularization term $\beta ||R(x-m)||_2^2$: # + pycharm={"name": "#%%\n"} x.regop = np.array([[1., 2.], [3., 4.]]) x.mean = np.array([1., 1.]) x.beta = 0.1 # + [markdown] pycharm={"name": "#%% md\n"} # The second parameter ``y`` is not regularized, but it has to satisfy the lower-bound constraint $y > 0$. We implement this strict inequality constraint as an equality constraint # $y \geq \epsilon$ for a small, positive $\epsilon > 0$. # + pycharm={"name": "#%%\n"} eps = 1e-10 y.lb = np.array([eps]) # + [markdown] pycharm={"name": "#%% md\n"} # With this setup, we can finally create our optimization problem and solve it with ``cgn``. # + pycharm={"name": "#%%\n"} inequality_constraint = cgn.NonlinearConstraint(parameters=[x], fun=g, jac=Dg, ctype="ineq") problem = cgn.Problem(parameters=[x, y], fun=F, jac=DF, constraints=[inequality_constraint]) solver = cgn.CGN() solution = solver.solve(problem=problem) # + [markdown] pycharm={"name": "#%% md\n"} # Seems to work without problems. Let us look at the minimizers: # + pycharm={"name": "#%%\n"} x_min = solution.minimizer("x") y_min = solution.minimizer("y") print(f"x_min = {x_min}") print(f"y_min = {y_min}")
examples/example3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np # # Cleaning plan # # ## For weights just use the weight of the other competitor if possible ✅ # # ## Locations just impute 'unkown' ✅ # # ## For height try to see if they have a height in another fight and use that number ✅ # # ## For winby just impute 'unkown' # # ## For age try to find the age in a previous fight and calculate age using the difference in dates pd.options.display.max_columns = 30 pd.options.display.max_rows = 100 df = pd.read_csv('fights') df.head() [print(x) for x in df.B_HomeTown.values if type(x)==int] df.isna().sum() df_nans = df[df.isna().any(axis=1)] df_nans # ## Clean heights ✅ # - Look for height record in different fight # - seems like there are very few data to impute for Height # - There are a lot of data for imputing in R_Height though but not for B_Height wich is strange # - Use ID to search other fights gimmie = df[df['R_ID'] == 2179] # n = gimmie.iloc[0]['R_Height'] # pd.isna(n) gimmie # + # df['B_Height'] = df.groupby(['B_ID'])['B_Height'].ffill() # + # Return all fights with that fighter where height isn't NaN # Use that value to replace NaN height values for that fighter def find_and_impute(data, impute, identifiers=[]): ''' Takes in a dataframe, finds the nan values for "impute" row uses the id row to find non NaN value in the impute row ''' d_copy = data.copy() print(d_copy[impute].isna().sum()) for index, row in d_copy.iterrows(): if pd.isna(row[impute]): ID = row[identifiers[0]] temp = df[df[identifiers[0]] == ID] temp2 = df[df[identifiers[1]] == ID] if temp.B_Height.mean(): df.at[index, impute] = temp.B_Height.mean() # elif temp2.R_Height.mean(): # df.at[index, 'B_Height'] = temp2.R_Height.mean() print(df[impute].isna().sum()) # - find_and_impute(df, 'R_Height', ['B_ID', 'R_ID']) find_and_impute(df, 'B_Height', ['R_ID', 'B_ID']) # + # for index, row in df.iterrows(): # if pd.isna(row.B_Height): # ID = row.B_ID # temp = df[df['B_ID'] == ID] # temp2 = df[df['R_ID'] == ID] # r_id.append(ID) # print(f'ID: {ID}') # print(temp.B_Height, temp2.R_Height) # + # print(df.R_Height.isna().sum()) # for index, row in df.iterrows(): # if pd.isna(row.R_Height): # ID = row.B_ID # temp = df[df['B_ID'] == ID] # temp2 = df[df['R_ID'] == ID] # if temp.B_Height.mean(): # df.at[index, 'R_Height'] = temp.B_Height.mean() # elif temp2.R_Height.mean(): # df.at[index, 'B_Height'] = temp2.R_Height.mean() # print(df.R_Height.isna().sum()) # print(f'ID: {ID}') # print(temp.B_Height, temp2.R_Height) # - # ## impute for age # - Find the age in a fight and calculate age based on difference in time # # df1 = df[df.isna().any(axis=1)] df1 = df[df..isna()] df1 df[df.B_ID==2179] type(df[df.R_ID==2179].Date[307]) cop = df.copy() print(cop.shape) cop = cop.dropna() print(cop.shape) clean = pd.read_csv('clean_fights') # + # interate through data and find fights with missing age values # Look for their age in a different fight and calculate the time difference of that nan # Use the difference to calculate their aproximate age def find_and_impute(data, impute, identifiers=[]): ''' Takes in a dataframe, finds the nan values for "impute" row uses the id row to find non NaN value in the impute row ''' d_copy = data.copy() print(d_copy[impute].isna().sum()) for index, row in d_copy.iterrows(): if pd.isna(row[impute]): # ID = row[f'{impute.split("_")[0]}_{identifiers[0].split("_")[1]}'] ID = row[identifiers[0]] year = row.Date[-4:] ident_0 = d_copy[d_copy[identifiers[0]] == ID] ident_1 = d_copy[d_copy[identifiers[1]] == ID] # print('YEAR:', year) split = impute.split('_') # print(split) B_impute = f'B_{split[1]}' R_impute = f'R_{split[1]}' print(ID) if ~ident_0[B_impute].empty: # loc = [x for x in ident_0[B_impute].values if type(x)==str] # d_copy.at[index, impute] = ident_0[B_impute].values print(ident_0[B_impute].values) # print(loc) if ~ident_1[R_impute].empty: # loc = [x for x in ident_1[R_impute].values if type(x)==str] # d_copy.at[index, impute] = ident_1[R_impute].values print(ident_1[R_impute].values) # print(loc) print(d_copy[impute].isna().sum()) # - find_and_impute(df, 'B_Age', ['R_ID', 'B_ID']) name = 'R_Age' split = name.split('_') df[f'R_{split[1]}'] R = 'R_HomeTown' B = 'B_ID' print(f'{R.split("_")[0]}_{B.split("_")[1]}') t_df = df[df.R_ID==2179] type(t_df.R_HomeTown[307]) # ## impute for locations ✅ # - if location is NaN look for fighter ID in different fight and use that hometown # - for fight location just impute unkown find_and_impute(df, 'R_Location', ['B_ID', 'R_ID']) find_and_impute(df, 'B_HomeTown', ['B_ID', 'R_ID']) find_and_impute(df, 'B_Location', ['Event_ID', 'Event_ID']) # + # find_and_impute(df, 'B_Location', ['Event_ID', 'Event_ID']) # - # ## Fixing weights ✅ # - fill na values with values of the other corner # - calc the avg weight in lbs and add weightclass column # + # Filling NaN weight values with value of the other corner # df['B_Weight'] = df['B_Weight'].fillna(df['R_Weight']) # df['R_Weight'] = df['R_Weight'].fillna(df['B_Weight']) # + # df['avg_weight_lbs'] = (df.B_Weight + df.R_Weight)/2 * 2.2 # + # ranges = [-np.inf, 125.0, 135.0, 145.0, 155.0, 165.0, 170.0, 175.0, 185.0, 195.0, 205.0, 225.0, 265.0, np.inf] # labels = ['Strawwweight','Flyweight', 'Bantamweight', 'Featherweight', 'Lightweight', 'Super Lightweight', 'Welterweight', 'Super Welterweight', # 'Middleweight', 'Super Middleweight', 'Light Heavyweight', 'Cruiserweight', 'Heavyweight'] # weight_classes = df.weight_class = pd.cut(df.avg_weight_lbs, ranges, labels=labels) # + # df['weight_class'] = pd.Series(weight_classes) # - df.head(10)
Data_cleaning.ipynb
// -*- coding: utf-8 -*- // --- // jupyter: // jupytext: // text_representation: // extension: .cs // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: .NET (C#) // language: C# // name: .net-csharp // --- // # NB03a Retention time and scan time // // [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/CSBiology/BIO-BTE-06-L-7/gh-pages?filepath=NB03a_Retention_time_and_scan_time.ipynb) // // [Download Notebook](https://github.com/CSBiology/BIO-BTE-06-L-7/releases/download/NB03a_NB03b_NB03c/NB03a_Retention_time_and_scan_time.ipynb) // // 1. Retention time and scan time // 1. m/z calculation of the digested peptides // 2. Determination of peptide hydrophobicity // // ## Retention time and scan time // // In general, peptides are separated by one or more steps of liquid chromatography (LC). The retention time (RT) is the time when the measured // peptides were eluting from the column and is therefore influenced by the physicochemical interaction of the particular peptide with the // column material. Scan time is basically synonym to retention time, but more from the point of view of the device. // // The aim of this notebook is to understand that even though peptides are roughly separated by the LC, multiple peptides elute at the same // retention time and are recorded within one MS1 spectrum. Here, we will simulate a MS1 spectrum by random sampling from // our previously generated peptide-mass distribution. Further, we will try to improve our simulation by incorporating information about the peptide // hydrophobicity. It is a only a crude model, but considers the fact that less hydrophobic peptides elute faster from the 13C LC column. // // As always, we start by loading our famous libraries. // + dotnet_interactive={"language": "fsharp"} #r "nuget: FSharp.Stats, 0.4.0" #r "nuget: BioFSharp, 2.0.0-beta5" #r "nuget: BioFSharp.IO, 2.0.0-beta5" #r "nuget: Plotly.NET, 2.0.0-beta6" #r "nuget: BIO-BTE-06-L-7_Aux, 0.0.1" #r "nuget: Plotly.NET, 2.0.0-beta8" #r "nuget: Plotly.NET.Interactive, 2.0.0-beta8" open BioFSharp open Plotly.NET open BioFSharp.Elements open BIO_BTE_06_L_7_Aux open FS3_Aux open Retention_time_and_scan_time_Aux open System.IO open FSharp.Stats // - // ## m/z calculation of the digested peptides // // I think you remember the protein digestion process from the privious notebook (see: *NB02b\_Digestion\_and\_mass\_calculation.ipynb* ). This time we also remember the peptide sequence, because we need it later for hydrophobicity calculation. // + dotnet_interactive={"language": "fsharp"} // Code-Block 1 let directory = __SOURCE_DIRECTORY__ let path = Path.Combine[|directory;"downloads/Chlamy_JGI5_5(Cp_Mp).fasta"|] downloadFile path "Chlamy_JGI5_5(Cp_Mp).fasta" "bio-bte-06-l-7" // with /../ we navigate a directory path let peptideAndMasses = path |> IO.FastA.fromFile BioArray.ofAminoAcidString |> Seq.toArray |> Array.mapi (fun i fastAItem -> Digestion.BioArray.digest Digestion.Table.Trypsin i fastAItem.Sequence |> Digestion.BioArray.concernMissCleavages 0 0 ) |> Array.concat |> Array.map (fun peptide -> // calculate mass for each peptide peptide.PepSequence, BioSeq.toMonoisotopicMassWith (BioItem.monoisoMass ModificationInfo.Table.H2O) peptide.PepSequence ) peptideAndMasses |> Array.head // - // Calculate the single and double charged m/z for all peptides and combine both in a single collection. // + dotnet_interactive={"language": "fsharp"} // Code-Block 2 // calculate m/z for each peptide z=1 let singleChargedPeptides = peptideAndMasses // we only consider peptides longer than 6 amino acids |> Array.filter (fun (peptide,ucMass) -> peptide.Length >=7) |> Array.map (fun (peptide,ucMass) -> peptide, Mass.toMZ ucMass 1.) // calculate m/z for each peptide z=2 let doubleChargedPeptides = peptideAndMasses // we only consider peptides longer than 6 amino acids |> Array.filter (fun (peptide,ucMass) -> peptide.Length >=7) |> Array.map (fun (peptide,ucMass) -> peptide, Mass.toMZ ucMass 2.) // combine this two let chargedPeptides = Array.concat [singleChargedPeptides;doubleChargedPeptides] chargedPeptides.[1] // - // Now, we can sample our random "MS1" spectrum from this collection of m/z. // + dotnet_interactive={"language": "fsharp"} // Code-Block 3 // initialze a random generator let rnd = new System.Random() // sample n random peptides from all Chlamydomonas reinhardtii peptides let chargedPeptideChar = Array.sampleWithOutReplacement rnd chargedPeptides 100 // we only want the m/z |> Array.map (fun (peptide,mz) -> mz,1.) |> Chart.Column |> Chart.withX_AxisStyle("m/z", MinMax=(0.,3000.)) |> Chart.withY_AxisStyle ("Intensity", MinMax=(0.,1.3)) |> Chart.withSize (900.,400.) chargedPeptideChar // - // This looks quite strange. I think you immediately see that we forgot about our isotopic cluster. A peptide doesn’t produce a single peak, // but a full isotopic cluster. Therefore, we use our convenience function from the previous notebook // (see: *NB02c\_Isotopic\_distribution.ipynb* ). // // + dotnet_interactive={"language": "fsharp"} // Code-Block 4 // Predicts an isotopic distribution of the given formula at the given charge, // normalized by the sum of probabilities, using the MIDAs algorithm let generateIsotopicDistribution (charge:int) (f:Formula.Formula) = IsotopicDistribution.MIDA.ofFormula IsotopicDistribution.MIDA.normalizeByMaxProb 0.01 0.005 charge f |> List.toArray generateIsotopicDistribution // + dotnet_interactive={"language": "fsharp"} // Code-Block 5 let peptidesAndMassesChart = // sample n random peptides from all Chlamydomonas reinhardtii peptides Array.sampleWithOutReplacement rnd peptideAndMasses 500 |> Array.map (fun (peptide,mz) -> peptide |> BioSeq.toFormula // peptides are hydrolysed in the mass spectrometer, so we add H2O |> Formula.add Formula.Table.H2O ) |> Array.collect (fun formula -> [ // generate single charged iones generateIsotopicDistribution 1 formula // generate double charged iones generateIsotopicDistribution 2 formula ] |> Array.concat ) |> Chart.Column |> Chart.withX_AxisStyle("m/z", MinMax=(0.,3000.)) |> Chart.withY_AxisStyle ("Intensity", MinMax=(0.,1.3)) |> Chart.withSize (900.,400.) peptidesAndMassesChart // HINT: zoom in on peptides // - // ## Determination of peptide hydrophobicity // // In a MS1 scan, peptides don't appear randomly. They elute according to their hydrophobicity and other physicochemical properties // from the LC. // // To more accurately represent a MS1 spectrum, we determine the hydrophobicity of each peptide. Therefore, we first need a function // that maps from sequence to hydrophobicity. // + dotnet_interactive={"language": "fsharp"} // Code-Block 6 open BioFSharp.AminoProperties // first, define a function that maps from amino acid to hydophobicity let getHydrophobicityIndex = BioFSharp.AminoProperties.initGetAminoProperty AminoProperty.HydrophobicityIndex // second, use that function to map from peptide sequence to hydophobicity let toHydrophobicity (peptide:AminoAcids.AminoAcid[]) = peptide |> Array.map AminoAcidSymbols.aminoAcidSymbol |> AminoProperties.ofWindowedBioArray 3 getHydrophobicityIndex |> Array.average toHydrophobicity // + dotnet_interactive={"language": "fsharp"} // Code-Block 7 let peptidesFirst200 = chargedPeptides // now we sort according to hydrophobicity |> Array.sortBy (fun (peptide,mass) -> peptide |> Array.ofList |> toHydrophobicity ) |> Array.take 200 peptidesFirst200 |> Array.head // - // Now, we need to generate the isotopic cluster again and visualize afterwards. // + dotnet_interactive={"language": "fsharp"} // Code-Block 8 let peptidesFirst200Chart = peptidesFirst200 |> Array.map (fun (peptide,mz) -> peptide |> BioSeq.toFormula // peptides are hydrolysed in the mass spectrometer, so we add H2O |> Formula.add Formula.Table.H2O ) |> Array.collect (fun formula -> [ // generate single charged iones generateIsotopicDistribution 1 formula // generate double charged iones generateIsotopicDistribution 2 formula ] |> Array.concat ) // Display |> Chart.Column |> Chart.withX_AxisStyle("m/z", MinMax=(0.,3000.)) |> Chart.withY_AxisStyle ("Intensity", MinMax=(0.,1.3)) |> Chart.withSize (900.,400.) peptidesFirst200Chart // HINT: zoom in on peptides // - // ## Questions // // 1. How does the gradient applied at a reverse phase LC influence the retention time? // 2. Try generating your own MS1 spectrum with peptides of similar hydrophobicity. Take a look at Codeblock 7 and 8 to see how to do that. // 3. To better compare retention times between runs with different gradients or instruments, the retention time of those runs must be aligned. // What could be some ways to align the retention time of different runs?
Notebooks/NB03a_Retention_time_and_scan_time.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Problem : https://www.kaggle.com/c/titanic # ## 1. Question and Problem Definition # > Knowing from a training set of samples listing passengers who survived or did not survive the Titanic disaster, can our model determine based on a given test dataset not containing the survival information, if these passengers in the test dataset survived or not. # The highlighted domain of our problem: # > # - On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. Translated 32% survival rate. # - One of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew. # - Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class. # ## 2. Import Libraries # + # data analysis and wrangling import pandas as pd import numpy as np import random as rnd # visualization import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline # machine learning from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier # - # ## 3. Acquire Data train_df = pd.read_csv('./resource/titanic/train.csv') test_df = pd.read_csv('./resource/titanic/test.csv') combine = [train_df, test_df] # ## 4. Analyze by Describing Data # ### Which features are included in the given dataset? print(train_df.columns.values) # - These features are are describes on [this page](https://www.kaggle.com/c/titanic/data) # preview the data train_df.head() train_df.tail() # #### Categorical features(nominal/ ordinal/ ratio/ interal) # - Categorical: Survived, Sex, Embarked # - Ordinal: Pclass # #### Numerical features(discrete/ continuous/ timeseries) # - Discrete: SibSp, Parch # - Continuous: Age, Fare # #### Features with mixed data types # - **Ticket** is a mix of numeric and alphanumeric data types. **Cavin** is alphanumeric. # #### Features which contain errors or typos # - **Name** may contain errors or typos as there are several ways used to describe a name inclusing titles, round brackets, and quotes used for alternative or short names. # #### Features which contain blank, null or empty values # - **Cabin > Age > Embarked** features contain a number of null values in that order for the training dataset. # - **Cabin > Age** are incomplete in case of test dataset. # ### What are the data types for various features? train_df.info() print('_'*40) test_df.info() # - 7 features are integer or floats in train dataset. 6 in case of test dataset. # - 5 features are strings. # ### What is the distribution of numerical feature values across the samples? train_df.describe() # - Total samples are 891 or 40% of the actual number of passengers on board the Titanic(2,224). # - Survived is a categorical feature with 0 or 1 values. # - Around 38% samples survived representative of the actual survival rate at 32%. # - Most passengers (> 75%) did not travel with parents or childrem. # - Nearly 30% of the passengers had siblings and/or spouse aboard. # - Fares varied significantly with few passengers (<1%) paying as high as $512. # - Few elderly passengers(<1%) within age range 65-80. # ### What is the distribution of categorical features? # + train_df.describe(include=['O']) #string(object) types #include: List of data types to be included while describing dataframe. Default is None #data types: #'b' boolean #'i' (signed) integer #'u' unsigned integer #'f' floating-point #'c' complex-floating point #'O' (Python) objects #'S', 'a' (byte-)string #'U' Unicode #'V' raw data (void) # - # - Names are unique across the dataset (count=unique=891) # - Sex variable as two possible values with 65% male(top=male, freq=577/count=891) # - Cabin values have several duplicates across samples. Alternatively several passengers shared a cabin. # - Embarked takes three possible values. S port used by most passengers (top=S). # - TIcket feature has high ratio (22%) of duplicate values (unique=681). # ## 5. Assumptions based on data analysis # #### Correlating # We want to know how well does each feature correlate with Survival. train_df.corrwith(train_df.Survived) # #### Completing # 1. We may want to complete **Age** feature as it is definitely correlated to survival. # 2. We may want to complete the **Embarked** feature as it may also correlate with survival or another important feature. # #### Correcting # 1. **Ticket** feature may be dropped from our analysis as it contains high ratio of duplicates (22%) and there may not be a correlation between Ticket and survival. # 2. **Cabin** feature may be dropped as it is highly incomplete or contains many null values both in training and test dataset. # 3. **PassengerId** may be dropped from training set as it does not contribute to survival. # 4. **Name** feature is relatively non-standard, may not contribute directly to survival, so maybe dropped. # #### Creating # 1. We may want to create a new feature called **Family** based on Parch and SibSP to get total count of family members on board. # 2. We may want to engineer the Name feature to extract **Title** as a new feature. # 3. We may want to create new feature for Age bands. This turns a continuous numerical feature into an ordinal cotegorical feature. # 4. We may also want to create a Fare range feature if it helps our analysis. # #### Classifying # We may also add to our assumptions based on the problem description noted earlier. # 1. Women (Sex=female) were more likely to have survived. # 2. Childrem (Age<?) were more likely to have survived. # 3. The upper-class passengers (Pclass=1) were more likely to have survived. # ## 6. Analyze by pivoting features # To confirm some of our observations and assuptions, we can quickly analyze our feature correlations by pivoting features against each other. We can only do so at this stage for features which do not have any empty values. It also makes sense doing so only for features which are categorical (Sex), ordinal (Pclass), or discrete (SibSP, Parch) type. # - **Pclass** : We ovserve significant correlation (>0.5) among Pclass=1 and Survived (classifying #3). We decide to include this feature in our model. # - **Sex** : We confirm the observation during problem definition that Sex=female had very high survival rate at 74% (classifying #1). # - **SibSP and Parch** : These features have zero correlation for certain values. It may be best to derive a feature or a set of features from these individual features (creating #1). train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False) train_df[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False) train_df[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False) train_df[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False) # ## 7. Analyze by visualizing data # ### Correlating numerical features (Age) # Start by understanding correlations between numerical features and Survival. # <br/> # A histogram chart is useful for analyzing continuous numerical variables like **Age** where banding or ranges will help identify useful paterns. The histogram can indicate distribution of samples using automatically defined bins or equally ranged bands. This helps us answer questions relating to specific bands (Did infants have better survival rate?) g = sns.FacetGrid(train_df, col='Survived') g.map(plt.hist, 'Age', bins=20) # #### Observations # - Infants (Age <= 4) had high survival rate. # - Oldest passengers (Age = 80) survived. # - Lage number of 15-25 year olds did not survived. # - Most passengers are in 15-35 age range. # #### Decisions # This simple analysis confirm our assumptions as decisions for subsequent workflow stages. # - We should consider Age (our assumption classifying #2) in our model training. # - Complete the Age feature for null values (completing #1). # - We should band age groups (creating #3). # ### Correlating numerical and ordinal features (Pclass) # We can combine multiple features for identifying correlations using a single plot. This can be done with numerical and categorical features which have numeric values. # grid = sns.FacetGrid(train_df, col='Pclass', hue='Survived') grid = sns.FacetGrid(train_df, col='Survived', row='Pclass', height=2.2, aspect=1.6) grid.map(plt.hist, 'Age', alpha=.5, bins=20) grid.add_legend(); # #### Observations # - Pclass=3 had most passengers, however most did not survive. Confirms our classifying assumption #2. # - Infant passengers in Pclass=2 and Pclass=3 mostly survived. Further qualifies our classifying assumption #2. # - Most passengers in Pclass=1 survived. Confirms our classifying assumption #3. # - Pclass varies in terms of Age distribution of passengers. # #### Decisions # - Consider Pclass for model training. # ### Correlating categorical features (Sex) grid = sns.FacetGrid(train_df, row='Embarked', height=2.2, aspect=1.6) grid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep') grid.add_legend(); # #### Observations # - Female passengers had much better survival rate than males. Confirms classifying (#1). # - Exception in Embarked=C where males had higher survival rate. This could be correlation between Pclass and Embarked and in turn Pclass and Survived, not necessarily direct correlation between Embarked and Survived. # - Males had better survival rate in Pclass=3 when compared with Pclass=2 for C and Qports. Completing (#2)???? # - Ports of embarkation have varying survival rates for Pclass=3 and among male passengers. Correlating (#1). # #### Decisions # - Add Sex feature to model training. # - Complete and add Embarked feature to model training. # ### Correlating categorical and numerical features # We may also want to correlate categorical features (with non-numeric values) and numeric features. We can consider correlating Embarked (Categorical non-numeric), Sex (Categorical non-numeric), Fare (Numeric continuous), with Survived (Categorical numeric). grid = sns.FacetGrid(train_df, row='Embarked', col='Survived', height=2.2, aspect=1.6) grid.map(sns.barplot, 'Sex', 'Fare', alpha=.5, ci=None) grid.add_legend() # #### Observations # - Higher fare paying passengers had better survival. Confirms or assumption for creating (#4) fare ranges. # - Port of embarkation correlates with survival rates. Confirms correlating (#1) and completing (#2). # #### Decisions # - Consider banding Fare feature. # ## 8. Wrangle Data # We have collected several assumptions and decisions regarding our datasets and solution requirements. So far we did not have to change a single feature or value to arrive at these. Let us now execute our decisions and assumptions for correcting, creating, and completing goals. # ### Correcting by dropping features # This is a good starting goal to execute. By dropping features we are dealing with fewer data points. Speeds up our notebook and eases the analysis. # <br/> # Based on our assumptions and decisions we want to drop the Cabin (correcting #2) and Ticket (correcting #1) features. # + print("Before", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape) #drop features train_df = train_df.drop(['Ticket', 'Cabin'], axis=1) test_df = test_df.drop(['Ticket', 'Cabin'], axis=1) combine = [train_df, test_df] "After", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape # - # ### Creating new feature extracting from existing # We want to analyze if Name feature can be engineered to extract titles and test correlation between titles and survival, before dropping Name and PassengerId features. # <br/> # In the following code we extract Title feature using regular expressions. The RegEx pattern (\w+\.) matches the first word which ends with a dot character within Name feature. The expand=False flag returns a DataFrame. # + for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False) pd.crosstab(train_df['Title'], train_df['Sex']) # - # #### Observations # - Most titles band Age groups accurately. For example: Master title has Age mean of 5 years. # - Survival among Title Age bands varies slightly. # - Certain titles mostly survived (Mme, Lady, Sir) or did not (Don, Rev, Jonkheer). # #### Decision # - We decide to retain the new Title feature for model training. # + for dataset in combine: # classify many titles as "Rare" dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') # replace many titles with a more common name dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') train_df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean() # + # convert the categorical titles to ordinal and fill NA with 0 title_mapping = {'Mr': 1, 'Miss':2, 'Mrs':3, 'Master':4, 'Rare':5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train_df.head() # + # drop the Name feature from datasets. # We also do not need the PassengerId feature in the training dataset. train_df = train_df.drop(['Name', 'PassengerId'], axis=1) test_df = test_df.drop(['Name'], axis=1) combine = [train_df, test_df] train_df.shape, test_df.shape #stay the same # - # ### Converting a categorical feature # Now we can convert features which contain strings to numerical values. This is required by most model algorithms. Doing so will also help us in achieving the feature completing goal. # <br/> # Let us start by converting Sex feature to a new feature called Gender where female=1 and male=0. # + for dataset in combine: dataset['Sex'] = dataset['Sex'].map({'female':1, 'male':0}).astype(int) train_df.head() # - # ### Completing a numerical continuous feature # Now we should start estimating and completing features with missing or null values. We will first do this for the Age feature. # <br/> # We can consider three methods to complete a numerical continuous feature. # <br/> # 1. A simple way is to generate random numbers between mean and standard deviation. # **2. More accurate way of guessing missing values is to use other correlated features. In our case we note correlation among Age, Gender, and Pclass. Guess Age values using median values for Age across sets of Pclass and Gender feature combinations. So, median Age for Pclass=1 and Gender=0, Pclass=1 and Gender=1, and so on...** # 3. Combine methods 1 and 2. So instead of guessing age values based on median, use random numbers between mean and standard deviation, based on sets of Pclass and Gender combinations. # <br/> # Method 1 and 3 will introduce random noise into our models. The results from multiple executions might vary. We will prefer method 2. grid = sns.FacetGrid(train_df, row='Pclass', col='Sex', height=2.2, aspect=1.6) grid.map(plt.hist, 'Age', alpha=.5, bins=20) grid.add_legend() # Let us start by preparing an empty array to contain guessed Age values based on Pclass x Gender combinations. guess_ages = np.zeros((2,3)) guess_ages # Now we iterate over Sex (0 or 1) and Pclass (1, 2, 3) to calculate guessed values of Age for the six combinations. # + for dataset in combine: for i in range(0, 2): for j in range(0, 3): guess_df = dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j+1)]['Age'].dropna() # age_mean = guess_df.mean() # age_std = guess_df.std() # age_guess = rnd.uniform(age_mean - age_std, age_mean + age_std) age_guess = guess_df.median() # Convert random age float to nearest .5 age guess_ages[i,j] = int( age_guess/0.5 + 0.5 ) * 0.5 for i in range(0, 2): for j in range(0, 3): dataset.loc[ (dataset.Age.isnull()) & (dataset.Sex == i) & (dataset.Pclass == j+1), 'Age'] = guess_ages[i,j] dataset['Age'] = dataset['Age'].astype(int) train_df.head() # - # Let us create Age bands and determine correlations with Survived. # + train_df['AgeBand'] = pd.cut(train_df['Age'], 5) # Bin values into discrete intervals. # Use cut when you need to segment and sort data values into bins. train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean().sort_values( by='AgeBand', ascending=True) # - # Let us replace Age with ordinals based on these bands. # + for dataset in combine: dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0 dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1 dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2 dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3 dataset.loc[ dataset['Age'] > 64, 'Age'] train_df.head() ###AgeBand 이용해서 replace할수는 없나? # - # We can now remove the AgeBand feature train_df = train_df.drop(['AgeBand'], axis=1) combine = [train_df, test_df] train_df.head() # ### Create new feature combining existing features # We can create a new feature for FamilySize which combines Parch and SibSp. This will enable us to drop Parch and SibSp from our datasets. # + for dataset in combine: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False) # - # We can create another feature called IsAlone # + for dataset in combine: dataset['IsAlone'] = 0 dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1 train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean() # - # Let us drop Parch, SibSp, and FamilySize features in favor of IsAlone. # + train_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1) test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1) combine = [train_df, test_df] train_df.head() # - # We can also create an artifical feature combining Pclass and Age. # + for dataset in combine: dataset['Age*Class'] = dataset.Age * dataset.Pclass train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10) # - # ### Completing a categorical feature # Embarked feature takes S, Q, C values based on port of embarkation. Our training dataset has two missing values. We simply fill these with the **most common occurance**. freq_port = train_df.Embarked.dropna().mode()[0] freq_port # + for dataset in combine: dataset['Embarked'] = dataset['Embarked'].fillna(freq_port) train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False) # - # ### Converting categorical feature to numeric # We can now convert the EmbarkedFill feature by creating a new numeric Port feature. # + for dataset in combine: dataset['Embarked']= dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q' : 2} ).astype(int) train_df.head() # - # ### Quick completing and converting a numeric feature # We can now complete the Fare feature for single missing value in test dataset using mode to get the value that occurs most frequently for this feature. We do this in a single line of code. # # Note that we are not creating an intermediate new feature or doing any further analysis for correlation to guess missing feature as we are replacing only a single value. The completion goal achieves desired requirement for model algorithm to operate on non-null values. # # We may also want round off the fare to two decimals as it represents currency. test_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True) test_df.head() # We can not create FareBand ###not? train_df['FareBand'] = pd.qcut(train_df['Fare'], 4) train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True) # Convert the Fare feature to ordinal values based on the Fareband. # + for dataset in combine: dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] =0 dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1 dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2 dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3 dataset['Fare'] = dataset['Fare'].astype(int) train_df = train_df.drop(['FareBand'], axis=1) combine = [train_df, test_df] train_df.head(10) # - # And the test dataset test_df.head(10) # ## 9. Model, Predict, and Solve # Our problem is a classification and regression problem. We want to identify relationship between output (Survived or not) with other variables or features (Gender, Age, Port...). We are also perfoming a category of machine learning which is called supervised learning as we are training our model with a given dataset. With these two criteria - Supervised Learning plus Classification and Regression, we can narrow down our choice of models to a few. These include: # - Logistic Regression # - KNN or k-Nearest Neighbors # - Support Vector Machines # - Naive Bayes classifier # - Decision Tree # - Random Forest # - Perceptron # - Artificial neural network # - RVN or Relevance Vector Machine X_train = train_df.drop('Survived', axis=1) Y_train = train_df['Survived'] X_test = test_df.drop('PassengerId', axis=1).copy() X_train.shape, Y_train.shape, X_test.shape # ### 9.1. Logistic Regression # **Logistic Regression** is a useful model to run early in the workflow. Logistic regression measures the relationship between the categorical dependent variable (feature) and one or more independent variables (features) by estimating probabilities using a logistic function, which is the cumulative logistic distribution. # <br/> # We can use Logistic Regression to validate our assumptions and decisions for feature creating and completing goals. This can be done by calculating the coefficient of the features in the decision function. # <br/> # + # Logistic Regression logreg = LogisticRegression() logreg.fit(X_train, Y_train) Y_pred = logreg.predict(X_test) acc_log = round(logreg.score(X_train, Y_train) * 100, 2) acc_log # + coeff_df = pd.DataFrame(train_df.columns.delete(0)) coeff_df.columns = ['Feature'] coeff_df["Correlation"] = pd.Series(logreg.coef_[0]) coeff_df.sort_values(by='Correlation', ascending=False) # - # **Positive coefficients increase the log-odds of the response (and thus increase the probability), and negative coefficients decrease the log-odds of the response (and thus decrease the probability).** # <br/> # - Sex is highest positive coefficient, implying as the Sex value increases (male:0 to female:1), the probability of Survived=1 increases the most. # - Inversely as Pclass increases, probability of Survived=1 decreases the most. # - This way Age\*Class is as good artificial feature to model as it has second highest negative correlation with Survived. # - So is Title as second highest positive correlation. # ### 9.2. Support Vector Machines # Next we model using Support Vector Machines which are supervised learning models with associated learning algorithms that analyze data used for classification and regression analysis. Given a set of training samples, each marked as belonging to one or the other of two categories, an SVM training algorithm builds a model that assigns new test samples to one category or the other, making it a non-probabilistic binary linear classifier. Reference [Wikipedia](https://en.wikipedia.org/wiki/Support-vector_machine). # Note that the moddel generates **a confidence score which is higher** than Logistics Regression model. # + # Support Vector Machines svc = SVC() svc.fit(X_train, Y_train) Y_pred = svc.predict(X_test) acc_svc = round(svc.score(X_train, Y_train) * 100, 2) acc_svc # - # ### 9.3. KNN # In pattern recognition, the k-Nearest Neighbors algorithm (or k-NN for short) is a non-parametric method used for classification and regression. A sample is classified by a majority vote of its neighbors, with the sample being assigned to the class most common among its k nearest neighbors (k is a positive integer, typically small). If k = 1, then the object is simply assigned to the class of that single nearest neighbor. Reference [wikipedia](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm). # KNN confidence score is **better than Logistics Regression** but **worse than SVM**. # + #KNN knn = KNeighborsClassifier(n_neighbors = 3) knn.fit(X_train, Y_train) Y_pred = knn.predict(X_test) acc_knn = round(knn.score(X_train, Y_train) * 100, 2) acc_knn # - # ### 9.4. Naive Bayes Classifiers # In machine learning, naive Bayes classifiers are a family of simple probabilistic classifiers based on applying Bayes' theorem with strong (naive) independence assumptions between the features. Naive Bayes classifiers are highly scalable, requiring a number of parameters linear in the number of variables (features) in a learning problem. Reference [Wikipedia](https://en.wikipedia.org/wiki/Naive_Bayes_classifier). # The model generated confidence score is **the loweset** among the models evaluated so far. # + # Gaussian Naive Bayes gaussian = GaussianNB() gaussian.fit(X_train, Y_train) Y_pred = gaussian.predict(X_test) acc_gaussian = round(gaussian.score(X_train, Y_train) *100, 2) acc_gaussian # - # ### 9.5. Perceptron # The perceptron is an algorithm for supervised learning of binary classifiers (functions that can decide whether an input, represented by a vector of numbers, belongs to some specific class or not). It is a type of linear classifier, i.e. a classification algorithm that makes its predictions based on a linear predictor function combining a set of weights with the feature vector. The algorithm allows for online learning, in that it processes elements in the training set one at a time. Reference [Wikipedia](https://en.wikipedia.org/wiki/Perceptron). # + # Perceptron perceptron = Perceptron() perceptron.fit(X_train, Y_train) Y_pred = perceptron.predict(X_test) acc_perceptron = round(perceptron.score(X_train, Y_train) * 100, 2) acc_perceptron # - # ### 9.6. Linear SVC linear_svc = LinearSVC() linear_svc.fit(X_train, Y_train) Y_pred = linear_svc.predict(X_test) acc_linear_svc = round(linear_svc.score(X_train, Y_train) * 100, 2) acc_linear_svc # ### 9.7. Stochastic Gradient Descent # + # Stochastic Gradient Descent sgd = SGDClassifier() sgd.fit(X_train, Y_train) Y_pred = sgd.predict(X_test) acc_sgd = round(sgd.score(X_train, Y_train) * 100, 2) acc_sgd # - # ### 9.8. Decision Tree # This model uses a decision tree as a predictive model which maps features (tree branches) to conclusions about the target value (tree leaves). Tree models where the target variable can take a finite set of values are called classification trees; in these tree structures, leaves represent class labels and branches represent conjunctions of features that lead to those class labels. Decision trees where the target variable can take continuous values (typically real numbers) are called regression trees. Reference [Wikipedia](https://en.wikipedia.org/wiki/Decision_tree_learning). # The model confidence score is **the highest** among models evaluated so far. # + # Decision Tree decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, Y_train) Y_pred = decision_tree.predict(X_test) acc_decision_tree = round(decision_tree.score(X_train, Y_train) * 100, 2) acc_decision_tree # - # ### 9.9. Random Forests # The next model Random Forests is one of the most popular. Random forests or random decision forests are an ensemble learning method for classification, regression and other tasks, that operate by constructing a multitude of decision trees (n_estimators=100) at training time and outputting the class that is the mode of the classes (classification) or mean prediction (regression) of the individual trees. Reference [Wikipedia](https://en.wikipedia.org/wiki/Random_forest). # The model confidence score is **the highest** among models evaluated so far. We decide to use this model's output(Y_pred) for creating our competition submission of results. # + # Ramdom Forest random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(X_train, Y_train) Y_pred = random_forest.predict(X_test) random_forest.score(X_train, Y_train) acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2) acc_random_forest # - # ## 10. Model Evaluation # We can now rank our evaluation of all the models to choose the best one for our problem. While both Decision Tree and Random Forest score the same, we choose to use Random Forest as they correct for decision trees' habit of overfitting to their training set. # + models = pd.DataFrame({ 'Model' : ['SVM', 'KNN', 'Logistic Regression', 'Random Forest', 'Naive Bayes', 'Perceptron', 'Stochastic Gradient Decent', 'Linear SVC', 'Decision Tree'], 'Score' : [acc_svc, acc_knn, acc_log, acc_random_forest, acc_gaussian, acc_perceptron, acc_sgd, acc_linear_svc, acc_decision_tree] }) models.sort_values(by='Score', ascending=False) # + submission = pd.DataFrame({ 'PassengerId' : test_df['PassengerId'], 'Survived' : Y_pred }) #submission.to_csv('submission.csv', index=-False) # - # ## Reference # This notebook is based on the [Kagge Kernel](https://www.kaggle.com/startupsci/titanic-data-science-solutions/notebook) and will be used for studying only.
kernel_notes/01. Titanic Machine Learning from Disaster .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # >>> Work in Progress (Following are the lecture notes of Prof <NAME>/Head TA-<NAME> - CS229 - Stanford. This is my interpretation of his excellent teaching and I take full responsibility of any misinterpretation/misinformation provided herein.) # ## Lecture Notes # # #### Outline # - Decision Trees # - Ensemble Methods # - Bagging # - Random Forests # - Boosting # ### Decision Trees # - Non-linear model # - A model is called linear if the hypothesis function is of the form $h(x) = \theta^{T}x$ # - Ski example - months vs latitude - when you can ski # - we cannot get a linear classifier or use SVM for this # - with decision trees you will have a very natural way of classifying this # - partition this into individual regions, isolating positive and negative examples # # #### Selecting Regions - Greedy, Top-Down, Recursive Partitioning # - You ask question and partition the space and then iteratively keep asking new question, partitioning the space # - Is latitude > 30 # - Yes # - Is Month < 3 # - Yes # - No # - No # # - We are looking for a split function # - Region $R_{p}$ # - Looking for a split $S_{p}$ # > $S_{p}(j,t) = (\{ X|X_{j} \lt t, X \in R_{p}\}, \{ X|X_{j} \ge t, X \in R_{p}\} ) = (R_{1}, R_{2})$ # - where j is the feature number and t is the threshold # # #### How to choose splits # - isolate space of positives and negatives in this case # - Define L(R): loss on R # - Given C class, define $\hat{p_{i}}$ to be the __porportion of examples__ in R that are of class C # - Define misclassification loss of any region as # > $L_{misclass}(R) = 1 - \max\limits_{C} \hat{p}_{C}$ # - what we are saying here is for any region that we have subdivided, we want to predict the most common class there, which is the maximum of $\hat{p}_{C}$. The remaining is the probability of misclassification errors. # - We want to pick a split that maximizes the decrease of loss as much as possible over parent $R_{parent}$ and children regions $R_{1}, R_{2}$ # > $\max\limits_{j,t} L(R_{p}) - (L(R_{1}) + L(R_{2}))$ # # #### Why is misclassification loss the right loss # # # <img src="images/10_misclassificationLoss.png" width=400 height=400> # $\tiny{\text{YouTube-Stanford-CS229-<NAME>/<NAME>}}$ # # - We might argue that the decision boundary on right scenario is better than left, because in the right we are isolating out more positives # # - Loss of R1 and R2 region = 100 on right scenario # - Loss of R1' and R2' region = 100 on left scenario # - The loss of both parent Rp is also 100 # # - We can see that the misclassification loss is not sensitive enough # - its not sensitive enough or the loss is not informative enough because the parent level loss is same as child level loss # # - Instead we can define __cross entropy loss__ # > $L_{cross}(R) = - \sum\limits_{c}\hat{p}_{c} log_{2}\hat{p}_{c}$ # - we are summing over the classes the proportion of elements in that class times the log of proportion in that class # - if we know everything about one class, we dont need to communicate, as we know everything that it's a 100% chance that it is of one class # - if we have a even split, then we need to communicate lot more information about the class # # - Cross entropy came from information theory where it is used for transmitting bits, where you can transmit bits of information, which is why it came up as log base 2 # # #### Misclassification loss vs Cross-entropy loss # - Let the plot be between $\hat{p}$ - the proportion of positives in the set vs the loss # - the cross-entropy loss is a strictly concave curve # - Let $L(R_{1})$ and $L(R_{2})$ be the child loss plotted on the curve # - Let there be equal number of examples in both $R_{1}$ and $R_{2}$, are equally weighted # - the overall loss between the two is the average loss between the two, which is $\frac{L(R_{1}) + L(R_{2})}{2}$ # - the parent node loss is the projected loss on the curve $L(R_{p})$ # - the projection height is the change in loss # # - as we see below, \hat{p} parent is the average of child proportions # # <img src="images/10_crossEntropyLoss.png" width=400 height=400> # $\tiny{\text{YouTube-Stanford-CS229-Andrew Ng/Raphael Townshend}}$ # # - the cross-entropy diagram # # <img src="images/10_crossEntropyDiagram.png" width=400 height=400> # $\tiny{\text{YouTube-Stanford-CS229-Andrew Ng/Raphael Townshend}}$ # # # # - the misrepresenstation loss # - if we end up with child node loss on the same side of the curve, there is no change in loss and hence no information gain based on this kind of representation # - this is not strictly concave curve # # <img src="images/10_misrepresentationDiagram.png" width=400 height=400> # $\tiny{\text{YouTube-Stanford-CS229-<NAME>/Raphael Townshend}}$ # # - the decision splits curves that are successfully used are strictly concave curve # # - Gini curve # > $\sum\limits_{c}\hat{p}_{c}(1-\hat{p}_{c})$ # #### Regression Tree - Extension for decision tree # - So far we used decision tree for classification # - Decision trees can also be used for regression trees # - Example: Amount of snowfall # - Instead of predicting class, you predict mean of the # # - For Region $R_{m}$, the prediction will be # > Predict $\hat{y}_{m} = \frac{\sum\limits_{i \in R_{m}}Y_{i}}{|R_{m}|}$ # - sum all the values within the region and average them # # # <img src="images/10_regressionTrees.png" width=400 height=400> # $\tiny{\text{YouTube-Stanford-CS229-<NAME>/Raphael Townshend}}$ # # # The loss will be # > $L_{squared} = \frac{\sum\limits_{i \in R_{m}} (y_{i} - \hat{y}_{m})^{2} }{|R_{m}|}$ # #### Categorical Variables # - can ask questions on any form of subset, is location in northern hemisphere? # - $location \in \{N\}$ # - if there are q categories, the possible number of splits would be $2^{q}$, which very quickly becomes intractable # # # #### Regularization of DTs # - if you carry on the process of splits, you can split region for each datapoint and that will be case of overfitting # - Decision trees are high variance models # - So we need to regularize the decision tree models # - Heuristics for regularization # - If you have a minimum leaf size, stop # - max depth # - max number of nodes # - min decrease in loss # - Before split, the loss is: $L(R_{p})$ # - After split, the loss is: $L(R_{1}) + L(R_{2})$ # - if after split, the loss is not great enough, we might conclude that it didn't gain us anything # - but there might be some correlation between variables # - pruning # - you grow up your full tree and check which nodes to prune out # - you have a validation set that you use and you evaluate what your misclassification error is on the validation set, for each example for each leaf # #### Runtime # - n train examples # - f features # - d depth of tree # # ##### Test time O(d) # d < log n # # ##### Train time # - Each point is part of O(d) nodes # - Cost of point at each node is O(f) # - for binary features, the cost will be f # - for quantitative features, sort and scan linearly, the cost will be f, as well # - Total cost is O(nfd) # - where data matrix size is nf # - and depth is log n # - so cost is fairly fast training time # #### Downside of DT # - it does not have additive structure # - in the example below we get a very rough estimation of decision boundary # - decision trees have problems where the features are interacting additively with one another # # <img src="images/10_noAdditiveStructure.png" width=400 height=400> # $\tiny{\text{YouTube-Stanford-CS229-Andrew Ng/Raphael Townshend}}$ # # #### DT - Recap # - Pos # - Easy to explain # - Interpretable # - can deal with categorical variable # - generally fast # # - Neg # - high variance problems - generally leads to overfitting # - Not additive # - Low predictive accuracy # # - We can make it lot better with ensembling # ### Ensembling # - take $X_{i}'s$ which are random variables that are independent identically distributed (i.i.d.) # > $Var(X_{i}) = \sigma^{2}$ # > $Var(\bar{X}) = Var\left(\frac{1}{n}\sum\limits_{i}X_{i}\right) = \frac{\sigma^{2}}{n}$ # - which means each independent rv is decreasing the variance of your model # # - If we drop the independence assumption, so now $X_{i}'s$ are only i.d. X's are correlated by $\rho$ # - So the variance of mean will be: # > $Var(\bar{X}) = \rho \sigma^{2} + \frac{1-\rho}{n} \sigma^{2}$ # - if they are fully correlated ($\rho = 1$), it becomes $Var(\bar{X}) = \sigma^{2}$ # - if there is no correlation($\rho = 0$), it becomes $Var(\bar{X}) = \frac{\sigma^{2}}{n} $ # - there would be interest in models with large n so the second term goes down. Also have models that are decorrelated so the first term goes down # #### Ways to ensemble # - different algorithms, not really helpful # - use different training sets, not really helpful # - Bagging - Random Forest # - Boosting - Adaboost, xgboost # ### Bagging # - Bootstrap aggregation # - bootstrapping is a method used in statistics to measure uncertainty # - Say that a true population is P # - Training set $S \sim P$ # - Assume population is the training sample P = S # - Bootstrap samples Z \sim S # - Z is sampled from S. We take a training sample S with cardinality N. We sample N times from S with replacement, because we are assuming that S is a population and we are sampling from a population # - Take model and then train on all these separate bootstrap samples # # <br> # # #### Bootstrap aggregation # - we will train separate models separately and then average their outputs # - Say we have bootstrap samples $Z_{1},...,Z_{M}$ # - We train model $G_{m}$ on $Z_{m}$ and define # > Aggregate Predictor $G(m) = \frac{\sum\limits_{m=1}{M}G_{m}(x)}{M}$ # - This process is called bagging # # #### Bias-Variance Analysis # > $Var(\bar{X}) = \rho \sigma^{2} + \frac{1-\rho}{n} \sigma^{2}$ # - Bootstrapping is driving down $\rho$ # - But what about the second term # - With the increase in bootstrap samples, the M term increases, driving down the second term # - A nice property about bootstrapping is that increasing the number of bootstrap models does not cause overfit than before. # - More M causes less variance # - But the bias of the model increases # - because of the random subsampling from S, it causes model to be less complex as we are drawing less data, and increases the bias # #### Decision Trees + Bagging # - DT have high variance, low bias # - this makes DT ideal fit for bagging # ### Random Forest # - RF is a version of decision trees and bagging # - the random forest introduces even more randomization into each individual decision tree # - 1st - Earlier we learnt, bootstrapping drives down $\rho$ # - 2nd - But if we can further decorrelate the random variables, we can drive down the variance even further # - At each split for RF, we consider only a fraction of your total features # - 1st - Decreasing $\rho$ in $Var(\bar{X})$ # - 2nd - Say in a classification problem, we have found a very strong predictor that gives very good performance on its own (in ski example - the latitude split), and we use that predictor first at the first split. That causes all your models to be very highly correlated. So we should try to decorrelate the models # ### Boosting # - In bagging we tried to reduce variance # - Boosting is opposite. In boosting we try to reduce bias # - Is additive # - In bagging, we took average of number of variables # - In boosting, we train one model and then add it into the ensemble and then keep adding in as prediction # - Decision stump - ask one question at a time # - the reason behind this is: we are decreasing bias by restricting the tree depth to be only 1 # - this causes the bias to increase and decrease the variance # - Say we make a split and make some misclassifications. # - we identify those mistakes and increase the weights # - in the next iteration, it works on the modified sets - because of more weights on misclassfied samples, split might pick this weighted decision boundary # # <img src="images/10_boosting.png" width=400 height=400> # $\tiny{\text{YouTube-Stanford-CS229-<NAME>/<NAME>}}$ # # # #### Adaboost # - Determine for classifier $G_{m}$ a weight $\alpha_{m}$ proportional, which is log odds # > $log\left( \frac{1-err_{m}}{err_{m}}\right)$ # - Total classifier # > $G(x) = \sum\limits_{m}\alpha_{m}G_{m}$ # - each $G_{m}$ is trained on re-weighted training set # # - Similar mechanism is used to derive algorithm like XGBoost or gradient boosting machines that allow us to reweight the examples we are getting right or wrong in dynamic fashion and then adding them in additive fashion to your model
cs229_ml/lec10-DecisionTrees-EnsembleMethods.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + import xarray as xr import hvplot.pandas # noqa import hvplot.xarray # noqa import cartopy.crs as ccrs from bokeh.sampledata.airport_routes import airports # - # ## Installation # # The plot API also has support for geographic data built on top of Cartopy and GeoViews. Both can be installed using conda with: # # conda install -c pyviz geoviews # # or if the cartopy dependency has been satisfied in some other way, GeoViews may also be installed using pip: # # pip install geoviews # # ## Usage # # Only certain hvPlot types support geographic coordinates, currently including: 'points', 'polygons', 'paths', 'image', 'quadmesh', 'contour', and 'contourf'. As an initial example, consider a dataframe of all US airports (including military bases overseas): airports.head(3) # ### Plotting points # # If we want to overlay our data on geographic maps or reproject it into a geographic plot, we can set ``geo=True``, which declares that the data will be plotted in a geographic coordinate system. The default coordinate system is the ``PlateCarree`` projection, i.e., raw longitudes and latitudes. If the data is in another coordinate system, you will need to [declare an explicit ``crs``](#Declaring-a-CRS) as an argument, in which case `geo=True` is assumed. Once hvPlot knows that your data is in geo coordinates, you can use the ``tiles`` option to overlay a the plot on top of map tiles. airports.hvplot.points('Longitude', 'Latitude', geo=True, color='red', alpha=0.2, xlim=(-180, -30), ylim=(0, 72), tiles='ESRI') # ### Declaring a CRS # # To declare a geographic plot we have to supply a ``cartopy.crs.CRS`` (or coordinate reference system). Coordinate reference systems are described in the [GeoViews documentation](http://geoviews.org/user_guide/Projections.html) and the full list of available CRSs is in the [cartopy documentation](https://scitools.org.uk/cartopy/docs/v0.15/crs/projections.html). # ### Geopandas # # Since a GeoPandas ``DataFrame`` is just a Pandas DataFrames with additional geographic information, it inherits the ``.hvplot`` method. We can thus easily load shapefiles and plot them on a map: # + import geopandas as gpd cities = gpd.read_file(gpd.datasets.get_path('naturalearth_cities')) cities.hvplot(global_extent=True, frame_height=450, tiles=True) # - # The GeoPandas support allows plotting ``GeoDataFrames`` containing ``'Point'``, ``'Polygon'``, ``'LineString'`` and ``'LineRing'`` geometries, but not ones containing a mixture of different geometry types. Calling ``.hvplot`` will automatically figure out the geometry type to plot, but it also possible to call ``.hvplot.points``, ``.hvplot.polygons``, and ``.hvplot.paths`` explicitly. # # It is possible to declare a specific column to use as color with the ``c`` keyword: # + world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) world.hvplot(geo=True) + world.hvplot(c='continent', geo=True) # - # ### Declaring an output projection # The ``crs=`` argument specifies the *input* projection, i.e. it declares how to interpret the incoming data values. You can independently choose any *output* projection, i.e. how you want to map the data points onto the screen for display, using the ``projection=`` argument. After loading the same temperature dataset explored in the [Gridded Data](Gridded_Data.ipynb) section, the data can be displayed on an Orthographic projection: # + air_ds = xr.tutorial.open_dataset('air_temperature').load() air_ds.hvplot.quadmesh( 'lon', 'lat', 'air', projection=ccrs.Orthographic(-90, 30), global_extent=True, frame_height=540, cmap='viridis', coastline=True ) # - # Note that when displaying raster data in a projection other than the one in which the data is stored, it is more accurate to render it as a ``quadmesh`` rather than an ``image``. As you can see above, a QuadMesh will project each original bin or pixel into the correct non-rectangular shape determined by the projection, accurately showing the geographic extent covered by each sample. An Image, on the other hand, will always be rectangularly aligned in the 2D plane, which requires warping and resampling the data in a way that allows efficient display but loses accuracy at the pixel level. Unfortunately, rendering a large QuadMesh using Bokeh can be very slow, but there are two useful alternatives for datasets too large to be practical as native QuadMeshes. # # The first is using the ``datashade`` or ``rasterize`` options to regrid the data before rendering it, i.e., rendering the data on the backend and then sending a more efficient image-based representation to the browser. One thing to note when using these operations is that it may be necessary to project the data **before** rasterizing it, e.g. to address wrapping issues. To do this provide ``project=True``, which will project the data before it is rasterized (this also works for other types and even when not using these operations). Another reason why this is important when rasterizing the data is that if the the CRS of the data does not match the displayed projection, all the data will be projected every time you zoom or pan, which can be very slow. Deciding whether to ``project`` is therefore a tradeoff between projecting the raw data ahead of time or accepting the overhead on dynamic zoom and pan actions. # + rasm = xr.tutorial.open_dataset('rasm').load() rasm.hvplot.quadmesh( 'xc', 'yc', crs=ccrs.PlateCarree(), projection=ccrs.PlateCarree(), ylim=(0, 90), cmap='viridis', project=True, geo=True, rasterize=True, coastline=True, frame_width=800, dynamic=False, ) # - # Another option that's still relatively slow for larger data but avoids sending large data into your browser is to plot the data using ``contour`` and ``contourf`` visualizations, generating a line or filled contour with a discrete number of levels: rasm.hvplot.contourf( 'xc', 'yc', crs=ccrs.PlateCarree(), projection=ccrs.PlateCarree(), ylim=(0, 90), frame_width=800, cmap='viridis', levels=10, coastline=True ) # As you can see, hvPlot makes it simple to work with geographic data visually. For more complex plot types and additional details, see the [GeoViews](http://geoviews.org) documentation. # ## Geographic options # # The API provides various geo-specific options: # # - ``coastline`` (default=False): Whether to display a coastline on top of the plot, setting ``coastline='10m'/'50m'/'110m'`` specifies a specific scale # - ``crs`` (default=None): Coordinate reference system of the data specified as Cartopy CRS object, proj.4 string or EPSG code # - ``geo`` (default=False): Whether the plot should be treated as geographic (and assume PlateCarree, i.e. lat/lon coordinates) # - ``global_extent`` (default=False): Whether to expand the plot extent to span the whole globe # - ``project`` (default=False): Whether to project the data before plotting (adds initial overhead but avoids projecting data when plot is dynamically updated) # - ``tiles`` (default=False): Whether to overlay the plot on a tile source. Tiles sources can be selected by name, the default is 'Wikipedia'. # Other options are: 'CartoDark', 'CartoEco', 'CartoLight', 'CartoMidnight', 'EsriImagery', 'EsriNatGeo', 'EsriReference''EsriTerrain', 'EsriUSATopo', 'OSM', 'StamenLabels', 'StamenTerrain', 'StamenTerrainRetina', 'StamenToner', 'StamenTonerBackground', 'StamenWatercolor'
examples/user_guide/Geographic_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Evaluating Bridge Bidding import numpy as np def calculate_expected_information(probabilities = np.array([1/38]*28)): """calulates the expected information gain for a set of probabilities of bids. """ info_gained = np.log(n**(-2*probabilities) * p ** (-probabilities),4) return info_gained
evaluating_bidding_systems.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- Using stochastic gradient descent for regression from sklearn.datasets import make_regression X, y = make_regression(int(1e6)) #1,000,000 rows print "{:,}".format(X.nbytes) X.nbytes / 1e6 X.nbytes / (X.shape[0]*X.shape[1]) from sklearn.linear_model import SGDRegressor sgd = SGDRegressor() train = np.random.choice([True, False], size=len(y), p=[.75, .25]) sgd.fit(X[train], y[train]) # + y_pred = sgd.predict(X[~train]) # %matplotlib inline import pandas as pd pd.Series(y[~train] - y_pred).hist(bins=50)
Chapter02/.ipynb_checkpoints/Using stochastic gradient descent for regression-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Using Coco Dataset with Rikai # + from pyspark.sql.functions import udf, size, col from pyspark.sql.types import FloatType, StructField, StructType, IntegerType, ArrayType, StringType from pyspark.sql import SparkSession import pyspark.sql.functions as F import numpy as np from rikai.spark.utils import get_default_jar_version version = get_default_jar_version(use_snapshot=True) spark = ( SparkSession .builder .appName('rikai-quickstart') .config('spark.jars.packages', "ai.eto:rikai_2.12:{}".format(version)) .master('local[*]') .getOrCreate() ) # - # # Preparing Coco Dataset # # It will download [Fast.ai subset of Coco dataset](https://course.fast.ai/datasets#coco). It might take sometime. # + # Download Coco Sample Dataset from Fast.ai datasets import os import subprocess if not os.path.exists("coco_sample"): subprocess.check_call("wget https://s3.amazonaws.com/fast-ai-coco/coco_sample.tgz -O - | tar -xz", shell=True) else: print("Coco sample already downloaded...") # + # Convert coco dataset into Rikai format import json from rikai.spark.functions import image, box2d_from_top_left with open("coco_sample/annotations/train_sample.json") as fobj: coco = json.load(fobj) # print(coco.keys()) # print(coco["categories"]) # print(coco["annotations"][:10]) # + categories_df = spark.createDataFrame(coco["categories"]) # Make sure that all bbox coordinates are float anno_array = [{ "image_id": a["image_id"], "bbox": [float(x) for x in a["bbox"]], "category_id": a["category_id"] } for a in coco["annotations"]] anno_df = ( spark .createDataFrame(anno_array) .withColumn("box2d", box2d_from_top_left("bbox")) ) # We could use JOIN to replace pycocotools.COCO annotations_df = ( anno_df.join(categories_df, anno_df.category_id == categories_df.id) .withColumn("anno", F.struct([col("box2d"), col("name"), col("category_id")])) .drop("box", "name", "id", "category_id") .groupBy(anno_df.image_id) .agg(F.collect_list("anno").alias("annotations")) ) annotations_df.printSchema() annotations_df.show(5) # - # ## Build Coco dataset with image and annotations in Rikai format. # + from pyspark.sql.functions import col, lit, concat, udf from rikai.types.vision import Image from rikai.types.geometry import Box2d from rikai.spark.functions import image, box2d from rikai.spark.types import ImageType, Box2dType images_df = spark \ .createDataFrame(spark.sparkContext.parallelize(coco["images"])) \ .withColumn( "image", image(concat(lit("coco_sample/train_sample/"), col("file_name"))) ) images_df = images_df.join(annotations_df, images_df.id == annotations_df.image_id) \ .drop("annotations_df.image_id", "file_name", "id") images_df.show(5) images_df.printSchema() # + # Inspect Bounding Boxes on an Image from PIL import Image, ImageDraw row = images_df.where("id = 32954").first() image = row.image.to_pil() draw = ImageDraw.Draw(image) for anno in row.annotations: bbox = anno.box2d draw.rectangle(bbox.to_numpy().tolist(), outline="green", width=2) draw.text([bbox.xmin + 5, bbox.ymin - 10], str(anno.name), fill="red") image # - # Write Spark DataFrame into the rikai format. ( images_df .repartition(4) # Control the number of files .write .format("rikai") .mode("overwrite") .save("/tmp/rikaicoco/out") ) # # This dataset can be directly loaded into Pytorch # + from rikai.torch.data import DataLoader data_loader = DataLoader( "/tmp/rikaicoco/out", columns=["image_id", "image"], batch_size=4, shuffle=True, ) # - batch = next(iter(data_loader)) len(batch) # # Data is appropriately converted into pytorch.Torch batch[0]
notebooks/Coco.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf # ### Building a graph in v1.compat mode tf.compat.v1.executing_eagerly() # + a = tf.constant(5, name = "a") b = tf.constant(7, name = "b") c = tf.add(a, b, name = "sum") # - # using session and run() function will throw error now as eager execution is enabled by default # + sess = tf.compat.v1.Session() sess.run(c) # - c # ### Disabling eager_execution # + tf.compat.v1.disable_eager_execution() tf.compat.v1.executing_eagerly() # - tf.compat.v1.reset_default_graph() # + a = tf.constant(5, name = "a") b = tf.constant(7, name = "b") c = tf.add(a, b, name = "sum") # - c # + sess = tf.compat.v1.Session() sess.run(c) # + d = tf.multiply(a, b, name = "product") sess.run(d) # - sess.close() # ### Using variables, placeholders, and the feed dictionary # + m = tf.Variable([4.0, 5.0, 6.0], tf.float32, name='m') c = tf.Variable([1.0, 1.0, 1.0], tf.float32, name='c') # - m c # + x = tf.compat.v1.placeholder(tf.float32, shape=[3], name='x') x # + y = m * x + c y # - init = tf.compat.v1.global_variables_initializer() # !rm -rf ./logs/ with tf.compat.v1.Session() as sess: sess.run(init) y_output = sess.run(y, feed_dict={x: [100.0, 100.0, 100.0]}) print ("Final result: mx + c = ", y_output) writer = tf.compat.v1.summary.FileWriter('./logs', sess.graph) writer.close() # #### Using Tensorboard # %load_ext tensorboard # %tensorboard --logdir="./logs" --port 6060 # ### NOTE: # # - Do not explore the graph within Jupyter # - go to localhost:6060 in browser and show it again (explore it in the browser here) # - click on each node in the graph # - click on the tag drop-down on the left, only the default graph is present # - click on the trace inputs slider, then once again click on the different nodes. You will see specific paths highlighted # After this point restart the jupyter to enable eager execution again
notebooks/tensor01/02. V1Sessions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: qa:Python # language: python # name: conda-env-qa-py # --- # # Retriever Model and Pipeline Evaluation on LegalQuAD # This notebook contains the Evaluation of different Retriever and Reader Models on the German LegalQuAD test dataset. # The experiments were conducted as part of the Paper "Collaborative System for Question Answering in German Case Law Documents" published at the Pro-Ve 2022. # A detailed description of the experiments can be found in the paper. # + [markdown] tags=[] # ### Preprequisits: # # - Python >= 3.7.5 # - farm-haystack >= 1.0 # - PyTorch # - Running ElasticSearch DocumentStore (self-hosted or managed) # - GPU support is highly recommended # - # ## Table of Contents: # # * [1. Database setup](#chapter1) # * [2. Data Preprocessing & Database indexing](#chapter2) # * [3. Retriever and Reader model initialisation](#chapter3) # * [4. Evaluation & Results](#chapter4) # * [4.1 Retriever models standalone](#section_4_1) # * [4.1.1 Retriever model on top_k=10](#section_4_1_1) # * [4.1.2 Retriever model with top_k=range(10,110,10)](#section_4_1_2) # * [4.2 QA-Pipeline](#section_4_2) # * [4.2.1 QA-Pipeline with BM25](#section_4_2_1) # * [4.2.2 QA-Pipeline with MFAQ](#section_4_2_2) # * [4.2.3 QA Pipeline with Ensemble Retriever (BM25 + MFAQ)](#section_4_2_3) # ## 1. Database setup <a class="anchor" id="chapter1"></a> # + # Import ElasticSearch DocumentStore from haystack.document_stores import ElasticsearchDocumentStore # Define doc and label index for the database doc_index = "eval_docs" label_index = "eval_labels" # Init and connect to ElasticSearch document_store = ElasticsearchDocumentStore(host="HOST", port=443, scheme='https', username="", password="", index=doc_index, label_index=label_index, embedding_field="emb", embedding_dim=768, excluded_meta_data=["emb"], similarity="dot_product") # + [markdown] tags=[] # ## 2. Data Preprocessing & Database indexing <a class="anchor" id="chapter2"></a> # + tags=[] # Import Preprocessor from haystack.nodes import PreProcessor # Init preprocessor # Split documents after 200 words preprocessor = PreProcessor( language='de', split_length=200, split_overlap=0, split_respect_sentence_boundary=False, clean_empty_lines=False, clean_whitespace=False ) # delete indicies to make sure, there are no duplicates document_store.delete_documents(index=doc_index) document_store.delete_documents(index=label_index) # Convert SQuAD dataset into haystack document format document_store.add_eval_data( filename="../data/legal_squad_test.json", doc_index=doc_index, label_index=label_index, preprocessor=preprocessor ) # + [markdown] tags=[] # ## 3. Retriever and Reader model initialisation <a class="anchor" id="chapter3"></a> # + tags=[] # Import Retrieval methods from haystack.nodes import TfidfRetriever, ElasticsearchRetriever, DensePassageRetriever, EmbeddingRetriever # Init TF-IDF retriever_tfidf = TfidfRetriever(document_store=document_store) # Init BM25 retriever_bm25 = ElasticsearchRetriever(document_store=document_store) # Init EmbeddingRetriver from huggingface retriever_emb = EmbeddingRetriever(document_store=document_store, embedding_model="clips/mfaq") # Update passage embeddings inside the document store document_store.update_embeddings(retriever_emb, index=doc_index) # + [markdown] tags=[] # ## 4. Evaluation & Results <a class="anchor" id="chapter4"></a> # + tags=[] # Import Pipeline and pre-defined pipelines from haystack import Pipeline from haystack.pipelines import ExtractiveQAPipeline, DocumentSearchPipeline, JoinDocuments # Import Evaluation results and Labels from haystack.schema import EvaluationResult, MultiLabel # set evaluation labels eval_labels = document_store.get_all_labels_aggregated(drop_negative_labels=True, drop_no_answers=False) # - # ### 4.1 Retriever models standalone <a class="anchor" id="section_4_1"></a> # + [markdown] tags=[] # #### 4.1.1 Retriever model on top_k=10 <a class="anchor" id="section_4_1_1"></a> # + tags=[] # Evaluate all retriever models on top_k=10 def get_retrieval_results(top_k=10): """ Method iterates over the defined retrieval methods and evaluates their passage search capabilities. """ # list with the retrieval methods initialized in chapter 3. retrieval_methods = [retriever_tfidf, retriever_bm25, retriever_emb] for method in retrieval_methods: # init document search pipeline pipeline_ds = DocumentSearchPipeline(retriever=method) # init evaluation pipeline eval_result_pipeline = pipeline_ds.eval( labels=eval_labels, params={"Retriever": {"top_k": top_k}} ) # calculate and print metrics metrics = eval_result_pipeline.calculate_metrics() print(f"*** RETRIEVER RESULTS: {method.__str__()} ***") print(f'Retriever - Recall (single relevant document): {metrics["Retriever"]["recall_single_hit"]}') print(f'Retriever - Recall (multiple relevant documents): {metrics["Retriever"]["recall_multi_hit"]}') print(f'Retriever - Mean Reciprocal Rank: {metrics["Retriever"]["mrr"]}') print(f'Retriever - Precision: {metrics["Retriever"]["precision"]}') print(f'Retriever - Mean Average Precision: {metrics["Retriever"]["map"]}') print("******************************************************************") # call method get_retrieval_results() # + [markdown] tags=[] # #### 4.1.2 Retriever model with top_k=range(10,110,10) <a class="anchor" id="section_4_1_2"></a> # + tags=[] # Evaluate retriever models in range(10,110,10) def get_retrieval_results_in_range(): """ Method iterates over defined retrieval methods and evaluates their passage search capabilities. """ # list with the retrieval methods initialized in chapter 3. retrieval_methods = [retriever_tfidf, retriever_bm25, retriever_emb] for top_k in range (10,110,10): print(f"*** Results on top_k: {top_k} ***") for method in retrieval_methods: # init document search pipeline pipeline_ds = DocumentSearchPipeline(retriever=method) # init evaluation pipeline eval_result_pipeline = pipeline_ds.eval( labels=eval_labels, params={"Retriever": {"top_k": top_k}} ) # calculate and print metrics metrics = eval_result_pipeline.calculate_metrics() print(f"*** RETRIEVER RESULTS: {method.__str__()} ***") print(f'Retriever - Recall (single relevant document): {metrics["Retriever"]["recall_single_hit"]}') print(f'Retriever - Recall (multiple relevant documents): {metrics["Retriever"]["recall_multi_hit"]}') print(f'Retriever - Mean Reciprocal Rank: {metrics["Retriever"]["mrr"]}') print(f'Retriever - Precision: {metrics["Retriever"]["precision"]}') print(f'Retriever - Mean Average Precision: {metrics["Retriever"]["map"]}') print("******************************************************************") # call method get_retrieval_results_in_range() # + [markdown] tags=[] # ### 4.2 QA-Pipeline <a class="anchor" id="section_4_2"></a> # + tags=[] # Import FARMReader from haystack.nodes.reader import FARMReader # Init fine_tuned reader reader_fine_tuned = FARMReader("finetuned_models/GELECTRA-large-LegalQuAD-new", return_no_answer=True) # Init base reader reader_base = FARMReader("deepset/gelectra-base-germanquad", return_no_answer=True) # Init large reader reader_large = FARMReader("deepset/gelectra-large-germanquad", return_no_answer=True) # + [markdown] tags=[] # #### 4.2.1 QA-Pipeline with BM25 <a class="anchor" id="section_4_2_1"></a> # + tags=[] # Fine-tuned reader evaluation pipeline_qa_fine_tuned = ExtractiveQAPipeline(reader=reader_fine_tuned, retriever=retriever_bm25) eval_result_pipeline = pipeline_qa_fine_tuned.eval( labels=eval_labels, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}} ) # calculate and print metrics metrics = eval_result_pipeline.calculate_metrics() print(f"*** RESULTS: Reader Fine-tuned ***") print(f'Reader - F1-Score: {metrics["Reader"]["f1"]}') print(f'Reader - Exact Match: {metrics["Reader"]["exact_match"]}') print("******************************************************************") # + tags=[] # Base reader evaluation pipeline_qa_base = ExtractiveQAPipeline(reader=reader_base, retriever=retriever_bm25) eval_result_pipeline = pipeline_qa_base.eval( labels=eval_labels, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}} ) # calculate and print metrics metrics = eval_result_pipeline.calculate_metrics() print(f"*** RESULTS: Reader BASE ***") print(f'Reader - F1-Score: {metrics["Reader"]["f1"]}') print(f'Reader - Exact Match: {metrics["Reader"]["exact_match"]}') print("******************************************************************") # + tags=[] # Large reader evaluation pipeline_qa_large = ExtractiveQAPipeline(reader=reader_large, retriever=retriever_bm25) eval_result_pipeline = pipeline_qa_large.eval( labels=eval_labels, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}} ) # calculate and print metrics metrics = eval_result_pipeline.calculate_metrics() print(f"*** RESULTS: Reader LARGE ***") print(f'Reader - F1-Score: {metrics["Reader"]["f1"]}') print(f'Reader - Exact Match: {metrics["Reader"]["exact_match"]}') print("******************************************************************") # + [markdown] tags=[] # #### 4.2.2 QA-Pipeline with MFAQ <a class="anchor" id="section_4_2_2"></a> # + tags=[] # Fine-tuned reader evaluation pipeline_qa_fine_tuned = ExtractiveQAPipeline(reader=reader_fine_tuned, retriever=retriever_emb) eval_result_pipeline = pipeline_qa_fine_tuned.eval( labels=eval_labels, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}} ) # calculate and print metrics metrics = eval_result_pipeline.calculate_metrics() print(f"*** RESULTS: Reader Fine-tuned ***") print(f'Reader - F1-Score: {metrics["Reader"]["f1"]}') print(f'Reader - Exact Match: {metrics["Reader"]["exact_match"]}') print("******************************************************************") print(f"*** All Metrics ***") print(metrics) print("******************************************************************") # + tags=[] # Base reader evaluation pipeline_qa_base = ExtractiveQAPipeline(reader=reader_base, retriever=retriever_emb) eval_result_pipeline = pipeline_qa_base.eval( labels=eval_labels, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}} ) # calculate and print metrics metrics = eval_result_pipeline.calculate_metrics() print(f"*** RESULTS: Reader BASE ***") print(f'Reader - F1-Score: {metrics["Reader"]["f1"]}') print(f'Reader - Exact Match: {metrics["Reader"]["exact_match"]}') print("******************************************************************") print(f"*** All Metrics ***") print(metrics) print("******************************************************************") # + jupyter={"source_hidden": true} tags=[] # Large reader evaluation pipeline_qa_large = ExtractiveQAPipeline(reader=reader_large, retriever=retriever_emb) eval_result_pipeline = pipeline_qa_large.eval( labels=eval_labels, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}} ) # calculate and print metrics metrics = eval_result_pipeline.calculate_metrics() print(f"*** RESULTS: Reader LARGE ***") print(f'Reader - F1-Score: {metrics["Reader"]["f1"]}') print(f'Reader - Exact Match: {metrics["Reader"]["exact_match"]}') print("******************************************************************") print(f"*** All Metrics ***") print(metrics) print("******************************************************************") # + [markdown] tags=[] # #### 4.2.3 QA Pipeline with Ensemble Retriever (BM25 + MFAQ) <a class="anchor" id="section_4_2_3"></a> # + tags=[] # Ensemble Pipeline evaluation from haystack import Pipeline from haystack.pipelines import JoinDocuments pipeline_ensemble = Pipeline() pipeline_ensemble.add_node(component=retriever_bm25, name="Retriever_BM25", inputs=["Query"]) pipeline_ensemble.add_node(component=retriever_emb, name="Retriever_EMB", inputs=["Query"]) pipeline_ensemble.add_node(component=JoinDocuments(join_mode="concatenate"), name="JoinResults", inputs=["Retriever_BM25", "Retriever_EMB"]) pipeline_ensemble.add_node(component=reader_fine_tuned, name="Reader_Fine_tuned", inputs=["JoinResults"]) eval_result_pipeline = pipeline_ensemble.eval( labels=eval_labels, params={"Retriever_BM25": {"top_k": 10}, "Retriever_EMB": {"top_k": 10}, "Reader_Fine_tuned": {"top_k": 5}} ) # calculate and print metrics metrics = eval_result_pipeline.calculate_metrics() print(f"*** RESULTS: Reader LARGE ***") print(f'Reader - F1-Score: {metrics["Reader_Fine_tuned"]["f1"]}') print(f'Reader - Exact Match: {metrics["Reader_Fine_tuned"]["exact_match"]}') print("******************************************************************") print(f"*** All Metrics ***") print(metrics) print("******************************************************************")
src/Retrieval_QA_experiments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Lunar eclpise code # + # Setup stuff import csv from skyfield.api import load, Topos, wgs84, N, S, W, E from skyfield.positionlib import ICRF from skyfield import almanac # CONSTANTS -- CHANGE THESE # N,S,W,E are skyfield constants to put the negative sign for lat/Lon as needed #Default coordinates are for NYC LOC_NAME = 'nyc' #str constant for file naming MY_LAT = 40.7128 *N MY_LON = 74.0060 *W MY_ELEV_M = 10 #in meters # We need the position of celestial bodies.. so we need to get ephemeerides from JPL =O # https://ssd.jpl.nasa.gov/?planet_eph_export # DE440 : Created June 2020; compared to DE430, about 7 years of new data have # beend added. # Referred to the International Celestial Reference Frame version 3.0. # Covers JED 2287184.5, (1549 DEC 31) to JED 2688976.5, (2650 JAN 25). # DE441 : Created June 2020; compared to DE431, about 7 years of new data have # been added. # Referred to the International Celestial Reference Frame version 3.0. # Covers JED -3100015.5, (-13200 AUG 15) to JED 8000016.50, (17191 MAR 15). # DE440 and DE441 are documented in the following document: # https://doi.org/10.3847/1538-3881/abd414 # (NOTE: this paper has been accepted for publication in December, 2020; # this link will become available sometime in January) # https://rhodesmill.org/skyfield/planets.html#popular-ephemerides # de422.bsp is only 17mb and goes from 1900-2050 (issued 2008-02) 150yr # de430t.bsp is 128mb covering 1550-2650 (issued 2010-02) 1100yr # de440t.bst is an updated version of de430t w/ 7 years more data eph = load('de440t.bsp') ts = load.timescale() #switch to gregorian calendar from julian at calendar switch date from skyfield.api import GREGORIAN_START ts.julian_calendar_cutoff = GREGORIAN_START # + # %time # Find Lunar eclipses https://rhodesmill.org/skyfield/almanac.html#lunar-eclipses from skyfield import eclipselib earth = eph['earth'] sun = eph['sun'] moon = eph['moon'] # NYC lat/lon place = earth + wgs84.latlon(MY_LAT, MY_LON, elevation_m=MY_ELEV_M) t0 = ts.ut1(1550, 1, 1) t1 = ts.ut1(2640, 1, 1) t, y, details = eclipselib.lunar_eclipses(t0, t1, eph) # eclipselib.lunar_eclipses: # * A :class:`~skyfield.timelib.Time` giving the dates of each eclipse. # * An integer array of codes identifying how complete each eclipse is. # * A dictionary of further supplementary details about each eclipse. heading = ('visibility','altitude','date','TDB','eclipse_type') lunar_data = [] writer = csv.writer(open(LOC_NAME+"_lunar_complete.csv",'w')) writer.writerow(heading) for ti, yi in zip(t, y): alt, az, distance = place.at(ti).observe(moon).apparent().altaz() if alt.degrees > 0 : vis = 'visible' else: vis = "not_visible" row = (vis, alt.degrees, ti.utc_strftime('%Y-%m-%d'), ti.tdb, #'y={}'.format(yi), eclipselib.LUNAR_ECLIPSES[yi]) lunar_data.append(row) writer.writerow(row) # + # Also make a version that only has 'visible' non-penumbra eclipses # This will be our actual data set writer = csv.writer(open(LOC_NAME+"_lunar_observed.csv",'w')) writer.writerow(heading) for ti, yi in zip(t, y): alt, az, distance = place.at(ti).observe(moon).apparent().altaz() if alt.degrees > 0 : vis = 'visible' else: vis = "not_visible" row = (vis, alt.degrees, ti.utc_strftime('%Y-%m-%d'), ti.tdb, #'y={}'.format(yi), eclipselib.LUNAR_ECLIPSES[yi]) if eclipselib.LUNAR_ECLIPSES[yi] != 'Penumbral' \ and vis == 'visible': writer.writerow(row) # - # %time #Moon phase dataset writer = csv.writer(open(LOC_NAME+"_full_moons.csv",'w')) writer.writerow(('full_moon_date','moontype')) t_ph, y_ph = almanac.find_discrete(t0, t1, almanac.moon_phases(eph)) for ti_ph, yi_ph in zip(t_ph, y_ph): if yi_ph != 2: continue # full moons only row = ( ti_ph.utc_strftime('%Y-%m-%d'), almanac.MOON_PHASES[yi_ph] ) writer.writerow(row) y_ph[:5] import pandas as pd df = pd.DataFrame.from_records(lunar_data,columns=heading) df #Spot check the balance of visible vs not visible eclipses byt type df.groupby(['visibility','eclipse_type']).count()['date']
lunar_data_generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Dask [shared installation] # language: python # name: dask # --- import matplotlib.pyplot as plt import numpy as np import pandas as pd import xarray as xr import cartopy.crs as ccrs import glob import os import scipy.stats from matplotlib import cm import seaborn as sns import dask import matplotlib.colors as mcolors dask.config.set(**{'array.slicing.split_large_chunks': False}) models = [x.split('/')[-1] for x in glob.glob("/terra/data/cmip5/global/rcp85/*")] dic_rcp85 = {} dic_hist={} for model in models: try: rcp85_files = sorted(glob.glob("/terra/data/cmip5/global/rcp85/"+str(model)+"/r1i1p1/mon/native/pr_*")) rcp85 = xr.open_mfdataset(rcp85_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').pr rcp85 = rcp85.sel(time = slice('2045','2100')) rcp85 = rcp85.groupby(rcp85.time.dt.month).mean() hist_files = sorted(glob.glob("/terra/data/cmip5/global/historical/"+str(model)+"/r1i1p1/mon/native/pr_*")) hist = xr.open_mfdataset(hist_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').pr hist = hist.sel(time=slice('1950','2005')) hist = hist.groupby(hist.time.dt.month).mean() dic_hist[model] = hist dic_rcp85[model] = rcp85 except: print(model) # + model = 'BNU-ESM' # no historical monthly data rcp85_files = sorted(glob.glob("/terra/data/cmip5/global/rcp85/"+str(model)+"/r1i1p1/mon/native/pr_*")) rcp85 = xr.open_mfdataset(rcp85_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').pr rcp85 = rcp85.sel(time = slice('2045','2100')) rcp85 = rcp85.groupby(rcp85.time.dt.month).mean() hist_files = sorted(glob.glob("/terra/data/cmip5/global/historical/"+str(model)+"/r1i1p1/day/native/pr_*")) # - hist = xr.open_mfdataset(hist_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').pr.resample(time='M').mean() hist = hist.sel(time=slice('1950','2005')) hist = hist.groupby(hist.time.dt.month).mean() dic_hist[model] = hist dic_rcp85[model] = rcp85 "EC-EARTH and CESM1-WACCM - no rcp85 available" # + model = 'MPI-ESM-LR' # a problem with the later than 2100 data rcp85_files = sorted(glob.glob("/terra/data/cmip5/global/rcp85/"+str(model)+"/r1i1p1/mon/native/pr_*"))[0] rcp85 = xr.open_mfdataset(rcp85_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').pr rcp85 = rcp85.sel(time = slice('2045','2100')) rcp85 = rcp85.groupby(rcp85.time.dt.month).mean() hist_files = sorted(glob.glob("/terra/data/cmip5/global/historical/"+str(model)+"/r1i1p1/mon/native/pr_*")) hist = xr.open_mfdataset(hist_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').pr hist = hist.sel(time=slice('1950','2005')) hist = hist.groupby(hist.time.dt.month).mean() dic_hist[model] = hist dic_rcp85[model] = rcp85 # + model = 'CNRM-CM5' # a problem with the later than 2100 data rcp85_files = sorted(glob.glob("/terra/data/cmip5/global/rcp85/"+str(model)+"/r1i1p1/mon/native/pr_*"))[:2] rcp85 = xr.open_mfdataset(rcp85_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').pr rcp85 = rcp85.sel(time = slice('2045','2100')) rcp85 = rcp85.groupby(rcp85.time.dt.month).mean() hist_files = sorted(glob.glob("/terra/data/cmip5/global/historical/"+str(model)+"/r1i1p1/mon/native/pr_*")) hist = xr.open_mfdataset(hist_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').pr hist = hist.sel(time=slice('1950','2005')) hist = hist.groupby(hist.time.dt.month).mean() dic_hist[model] = hist dic_rcp85[model] = rcp85 # - #NOAA x = xr.open_mfdataset('/home/pmarsh/NOAA_2deg/prate.mon.mean.nc', decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').prate x = x.sel(time = slice('1950','2005')) NOAA = x.groupby(x.time.dt.month).mean() models = list(set(list(dic_hist.keys())) & set(dic_rcp85.keys())) len(models) # + colors=[] for i in mcolors.CSS4_COLORS: colors.append(i) colors = colors[::3][:len(models)] # - store_hist = dic_hist store_rcp85 = dic_rcp85 for model in models: for i,n in zip(range(12),[31,28.25,31,30,31,30,31,31,30,31,30,31]): dic_hist[model] = dic_hist[model].load() dic_hist[model][i] = dic_hist[model][i]*n*86400 dic_rcp85[model] = dic_rcp85[model].load() dic_rcp85[model][i] = dic_rcp85[model][i]*n*86400 for i,n in zip(range(12),[31,28.25,31,30,31,30,31,31,30,31,30,31]): NOAA = NOAA.load() NOAA[i] = NOAA[i]*n*86400 # + ax = plt.figure(figsize=(9,9)) for model,col in zip(models,colors): if model == 'BNU-ESM': pass else: dic_hist[model].plot(marker=".",color='grey',alpha=0.6,lw=2) NOAA.plot(marker=".",color='blue',label='NOAA 20CR',alpha=1,lw=3) dic_hist[model].plot(marker=".",color='grey',label='CMIP5 Models',alpha=0.6,lw=2) plt.title('Historical (1950-2005)') plt.xlabel('Month') plt.xticks([2,4,6,8,10,12],['F','A','J','A','O','D']) plt.ylabel('Monthly Cumulative Rainfall (mm)') #plt.ylim(0,120) plt.grid(True) plt.legend(fontsize='small') plt.margins(0) plt.savefig('Boxplots/Historical_1950_2005.png',dpi=100) plt.savefig('Boxplots/Historical_1950_2005.pdf') plt.show() # + ax = plt.figure(figsize=(9,9)) for model,col in zip(models,colors): dic_rcp85[model].plot(marker=".",color='grey',alpha=0.6,lw=2) NOAA.plot(marker=".",color='blue',label='NOAA 20CR',alpha=1,lw=3) dic_rcp85[model].plot(marker=".",color='grey',label='CMIP5 Models',alpha=0.6,lw=2) plt.title('RCP85 (2045-2100)') plt.xlabel('Month') plt.xticks([2,4,6,8,10,12],['F','A','J','A','O','D']) plt.ylabel('Monthly Cumulative Rainfall (mm)') plt.ylim(0,120) plt.grid(True) plt.legend(fontsize='small') plt.margins(0) plt.savefig('Boxplots/RCP85_2045_2100.png',dpi=100) plt.savefig('Boxplots/RCP85_2045_2100.pdf') plt.show() # + df = pd.DataFrame(['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'],columns=['month']) for model in models: df = df.join(pd.DataFrame({str(model): dic_rcp85[model].values - dic_hist[model].values})) df = df.set_index('month') # - df # + fig = plt.figure(figsize=(20, 9)) ax = sns.boxplot(data=df.T,color="skyblue") n = plt.setp(ax.get_xticklabels(), rotation=45) for patch in ax.artists: r, g, b, a = patch.get_facecolor() patch.set_facecolor((r, g, b, .6)) plt.xlabel('Month') plt.ylabel('Monthly Cumulative Rainfall (mm)') plt.savefig('Boxplots/RCP85_anom.png',dpi=100) plt.savefig('Boxplots/RCP85_anom.pdf') plt.show()
projections/pr/PR_Change_boxplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] lang="es" # <img src="../images/demos/FIUM.png" width="350px" class="pull-right" style="display: inline-block"> # # **ViA / Grado IngInf**<br> # curso 2018-19<br> # *[<NAME>](http://dis.um.es/profesores/alberto)* # # --- # - # # Machine Learning # # + [markdown] heading_collapsed=true # ## scikit-learn # + [markdown] hidden=true # Algunos algoritmos sencillos se podrían programar de cero si tuviéramos un poco más de tiempo. En nuestro caso es preferible practicar con la excelente biblioteca [scikit-learn](http://scikit-learn.org/stable/). # # Es muy sencilla de usar. Por ejemplo, para entrenar un árbol de decisión con el clásico problema de clasificación de flores [IRIS](https://en.wikipedia.org/wiki/Iris_flower_data_set), se hace lo siguiente: # + hidden=true from sklearn import datasets dataset = datasets.load_iris() # + hidden=true # dataset.keys() # + hidden=true # print(dataset['DESCR']) # + [markdown] hidden=true # Entrenamos un [árbol de decisión](https://en.wikipedia.org/wiki/Decision_tree_learning) con una parte de los ejemplos, reservando el resto para evaluar su calidad. # + hidden=true from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split (train_data , test_data, train_labels, test_labels) = train_test_split(dataset.data, dataset.target) model = DecisionTreeClassifier() model.fit(train_data, train_labels) print(model) # + [markdown] hidden=true # Ya podemos clasificar casos nuevos: # + hidden=true model.predict([ [6 , 3 , 3 , 1.5] ]) # + [markdown] hidden=true # Un objeto con ese vector de atributos se clasifica dentro de la clase 1, que corresponde a la flor *Iris- Versicolour*. # + [markdown] hidden=true # Finalmente, evaluamos la calidad del modelo obtenido con los ejemplos de test. # + hidden=true from sklearn import metrics expected = test_labels predicted = model.predict(test_data) print(metrics.classification_report(expected, predicted)) print(metrics.confusion_matrix(expected, predicted)) # + [markdown] hidden=true # El resultado depende de la partición aleatoria de los ejemplos, pero normalmente se clasifican casi todos bien. En realidad es un problema de clasificación muy sencillo. # + [markdown] heading_collapsed=true # ## MNIST dataset # + [markdown] hidden=true # Nuestro objetivo es construir un sistema que reconozca números manuscritos en imágenes tomadas con una cámara. Para ello vamos a aprovechar la conocida base de datos MNIST: # # http://yann.lecun.com/exdb/mnist/ # # *machine learning hello world* # + hidden=true # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import numpy.linalg as la # + hidden=true mnist = np.load("../data/mnist.npz") list(mnist.keys()) # + hidden=true xl,yl,xt,yt = [mnist[d] for d in ['xl', 'yl', 'xt', 'yt']] cl = np.argmax(yl,axis=1) ct = np.argmax(yt,axis=1) print(xl.shape, yl.shape, cl.shape) print(xt.shape, yt.shape, ct.shape) # + hidden=true def shdig(v): x = np.reshape(v,[28,28]) plt.imshow(1-x, 'gray', vmin=0, vmax=1, interpolation="nearest"); # + hidden=true shdig(xl[5]) # + hidden=true def muestrario(imgs,n=10): N = len(imgs) c = N // n r = N % n L = imgs + [np.zeros_like(imgs[0]) for k in range(n-r)] return np.vstack([ np.hstack([ x for x in L[n*k : n*(k+1)]]) for k in range(c if n*c==N else c+1)]) # + hidden=true plt.figure(figsize=(8,8)) plt.imshow(-muestrario([x.reshape(28,28) for x in xl[:100]]),'gray'); plt.axis('off'); # + hidden=true shdig(xl[68]) print(yl[68]) print(cl[68]) # + [markdown] heading_collapsed=true # ## Reducción de dimensión # + [markdown] hidden=true # La dimensión de los vectores de características es relativamente grande (28x28=784). Mediante el [análisis de componentes principales (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis) esa dimensión se puede reducir sin demasiada pérdida de información. # + hidden=true from sklearn import decomposition pca = decomposition.PCA(n_components=20) pca.fit(xl) comprime = pca.transform descomprime = pca.inverse_transform tr = comprime(xl) # + [markdown] heading_collapsed=true hidden=true # ### Proyección 2D # + hidden=true plt.figure(figsize=(6,6)) plt.plot(*tr[cl!=1][:,[0,1]].T,'.',markerSize=1,alpha=0.1,color='gray'); plt.plot(*tr[cl==1][:,[0,1]].T,'.',markerSize=1,alpha=0.2,color='blue'); # + hidden=true plt.figure(figsize=(6,6)) plt.plot(*tr[(cl!=3) & (cl!=8)][:,[0,1]].T,'.',markerSize=1,alpha=0.1,color='gray'); plt.plot(*tr[cl==3][:,[0,1]].T,'.',markerSize=1,alpha=0.2,color='blue'); plt.plot(*tr[cl==8][:,[0,1]].T,'.',markerSize=1,alpha=0.2,color='red'); # + [markdown] heading_collapsed=true hidden=true # ### Calidad de la reconstrucción # + hidden=true k = 2 plt.figure(figsize=(10,5)) plt.subplot(121) shdig(xl[k]) plt.subplot(122) shdig(descomprime(comprime([xl[k]]))[0]) # + [markdown] heading_collapsed=true hidden=true # ### Modos de variación # + hidden=true treses = xl[cl==3] print(treses.shape) shdig(treses[0]) # + hidden=true plt.figure(figsize=(8,8)) plt.imshow(-np.bmat([[ x.reshape(28,28) for x in treses[10*k:10*(k+1)] ] for k in range(10)]),'gray'); plt.axis('off'); # + hidden=true M = np.mean(treses,axis=0) shdig(M) # + hidden=true C = np.cov(treses.T) l,V = np.linalg.eigh(C) V = np.flipud(V.T) # + hidden=true plt.figure(figsize=(12,4)) plt.imshow(-np.bmat([[ (V[k]).reshape(28,28) for k in range(10)]]),'gray'); plt.axis('off'); # + hidden=true shdig(M + 3*V[0]) # + hidden=true r = np.linspace(-7,7,11) plt.imshow(np.bmat([[ (M + a*V[0]).reshape(28,28) for a in r]]),'gray'); # + hidden=true plt.figure(figsize=(12,4)) plt.imshow(1-np.bmat([[ (M + a*V[0]).reshape(28,28) for a in r]]),'gray',vmin=0,vmax=1); plt.axis('off'); # + hidden=true plt.figure(figsize=(12,4)) plt.imshow(1-np.bmat([[ (M + a*V[1]).reshape(28,28) for a in r]]),'gray',vmin=0,vmax=1); plt.axis('off'); # + hidden=true plt.figure(figsize=(8,8)) plt.imshow(1-np.bmat([[ (M + a*V[0] + b*V[1]).reshape(28,28) for a in r] for b in r]),'gray',vmin=0,vmax=1); plt.axis('off'); # + [markdown] heading_collapsed=true # ## Clasificador Gaussiano # + [markdown] hidden=true # Usamos scikit-learn para construir un clasificador basado clases gaussianas y reducción de dimensión mediante componentes principales (PCA). # + hidden=true from sklearn import random_projection, decomposition, naive_bayes, discriminant_analysis from sklearn.metrics import confusion_matrix def acc(maq,x,y): return 100*(y == maq.predict(x)).sum() / len(y) # + hidden=true #transformer = random_projection.GaussianRandomProjection(n_components=60).fit(xl) transformer = decomposition.PCA(n_components=40).fit(xl) # + hidden=true xrl = transformer.transform(xl) xrt = transformer.transform(xt) # + [markdown] hidden=true # Un clasificador "naive Bayes" tiene más de un 12% de errores, mientras que el gaussiano completo consigue menos de 4%: # + hidden=true gnb = naive_bayes.GaussianNB() # + hidden=true maq = gnb.fit(xrl, cl) # + hidden=true acc(maq,xrt,ct) # + hidden=true maq = discriminant_analysis.QuadraticDiscriminantAnalysis(store_covariance=True).fit(xrl,cl) # + hidden=true acc(maq,xrt,ct) # + hidden=true confusion_matrix(ct, maq.predict(xrt)) # + [markdown] hidden=true # Podemos clasificar cualquier imagen en el formato 28x28 adecuado: # + hidden=true dig = xt[1234] shdig(dig) maq.predict(transformer.transform(dig.reshape(1,-1))) # + [markdown] hidden=true # (Se hace `reshape` porque la máquina clasifica conjuntos de vectores de características como filas de una matriz.) # + [markdown] heading_collapsed=true # ## Imagen real # + [markdown] hidden=true # Para que los clasificadores funcionen bien con imágenes reales es necesario [normalizarlas](http://yann.lecun.com/exdb/mnist/) para que tengan el mismo tamaño y posición que los ejemplos de entrenamiento. # + hidden=true import cv2 as cv digits = cv.cvtColor(cv.imread('../images/mydigits.png'),cv.COLOR_BGR2RGB); plt.imshow(digits); # + hidden=true ret, gt = cv.threshold(cv.cvtColor(digits,cv.COLOR_RGB2GRAY),189,255,cv.THRESH_BINARY+cv.THRESH_OTSU) plt.imshow(gt,'gray'); # + hidden=true def center(p): r,c = p.shape rs = np.outer(range(r),np.ones(c)) cs = np.outer(np.ones(r),range(c)) s = np.sum(p) my = np.sum(p*rs) / s mx = np.sum(p*cs) / s return mx,my def boundingBox(c): (x1, y1), (x2, y2) = c.min(0), c.max(0) return (x1, y1), (x2, y2) def adaptsize(x): h,w = x.shape s = max(h,w) h2 = (s-h)//2 w2 = (s-w)//2 y = x if w2>0: z1 = np.zeros([s,w2]) z2 = np.zeros([s,s-w-w2]) y = np.hstack([z1,x,z2]) if h2>0: z1 = np.zeros([h2,s]) z2 = np.zeros([s-h-h2,s]) y = np.vstack([z1,x,z2]) y = cv.resize(y,(20,20))/255 mx,my = center(y) H = np.array([[1.,0,4-(mx-9.5)],[0,1,4-(my-9.5)]]) return cv.warpAffine(y,H,(28,28)) # + hidden=true contours,_ = cv.findContours(255-gt, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)[-2:] regions = [ boundingBox(x.reshape(-1,2)) for x in contours ] raw = [ 255-gt[y1:y2,x1:x2] for (x1,y1),(x2,y2) in regions if x2-x1 > 10 and y2-y1 > 10] ok = [ adaptsize(x) for x in raw ] # + hidden=true plt.imshow(-ok[3],'gray'); # + [markdown] hidden=true # Una vez hecho esto se pueden utilizar con el clasificador igual que antes: # + hidden=true dig = ok[1].flatten() shdig(dig) maq.predict(transformer.transform(dig.reshape(1,-1))) # + hidden=true digits = np.array(ok).reshape(-1,28*28) plt.imshow(-np.hstack([x.reshape(28,28) for x in ok]),'gray'); plt.axis('off'); maq.predict(transformer.transform(digits)) # + [markdown] heading_collapsed=true # ## Validez del modelo gaussiano # + [markdown] hidden=true # Si el modelo gaussiano de la distribución de clases es correcto podríamos generar muestras sintéticas realistas. # + [markdown] heading_collapsed=true hidden=true # ### Muestras sintéticas # + hidden=true C = np.array([[4,-3],[-3,5]]) if False: kk = np.random.multivariate_normal((0,0),C,1000) else: CC = np.linalg.cholesky(C) # ojo kk = np.random.randn(1000,2) @ CC.T plt.figure(figsize=(4,4)) plt.plot(*kk.T,'.'); plt.axis('equal'); print(np.mean(kk,axis=0)) print(np.cov(kk.T)) # + hidden=true from sklearn import decomposition # + hidden=true selected = xl[cl==3] pca = decomposition.PCA(n_components=5) pca.fit(selected) #pca.fit(xl) tr = pca.transform(selected) # + hidden=true k = 5 plt.figure(figsize=(8,4)) plt.subplot(121) shdig(selected[k]) plt.axis('off'); plt.subplot(122) shdig(pca.inverse_transform(tr[[k]])[0]) plt.axis('off'); # + hidden=true M = np.mean(tr,axis=0) C = np.cov(tr.T) # + hidden=true plt.figure(figsize=(12,4)) plt.imshow(1-np.bmat([[ pca.inverse_transform([np.random.multivariate_normal(M,C)])[0].reshape(28,28) for _ in range(11)]]),'gray',vmin=0,vmax=1); plt.axis('off'); # + [markdown] hidden=true # Otra posibilidad es hacer un [QQ plot](https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot) para comparar gráficamente las distribución de distancias de Mahalanobis, que es chi cuadrado. # + [markdown] hidden=true # Caso de prueba con una gaussiana real: # + hidden=true from scipy.stats import chi2 df = 10 data = np.sum(np.random.randn(1000,df)**2,axis=1) rv = chi2(df) x = sorted(data) n = len(x) y = np.linspace(1/n,1,n) y = np.arange(n)/n plt.figure(figsize=(12,12)) plt.subplot(221) plt.hist(data,bins=20,edgecolor='black',density=True); X = np.linspace(min(data),max(data),50) plt.plot(X,rv.pdf(X)); plt.subplot(222) plt.plot(x, rv.cdf(x), lw=7,color='gray'); plt.plot(x,y,color='black'); plt.subplot(223) plt.plot(y,rv.cdf(x)); plt.plot([0,1],[0,1],'gray',lw=5,alpha=0.3) plt.axis('equal'); plt.title('PP Plot') plt.subplot(224) plt.plot(x, rv.ppf(y)) mn = np.min(x) mx = np.max(x) plt.plot([mn,mx],[mn,mx],'gray',lw=5,alpha=0.3) plt.axis('equal'); plt.title('QQ Plot'); #print(mn,mx) # + [markdown] hidden=true # Con los dígitos seleccionados: # + hidden=true def distMah2(m,ic,v): return (v-m) @ ic @ (v-m) def dm(m,c): ic = np.linalg.inv(c) return lambda v: distMah2(m,ic,v) # + hidden=true d = dm(M,C) # + hidden=true data = [d(x) for x in tr] df = len(M) rv = chi2(df) x = sorted(data) n = len(x) y = np.linspace(1/n,1,n) y = np.arange(n)/n plt.figure(figsize=(12,12)) plt.subplot(221) plt.hist(data,bins=20,edgecolor='black',density=True); X = np.linspace(min(data),max(data),50) plt.plot(X,rv.pdf(X)); plt.subplot(222) plt.plot(x, rv.cdf(x), lw=7,color='gray'); plt.plot(x,y,color='black'); plt.subplot(223) plt.plot(y,rv.cdf(x)); plt.plot([0,1],[0,1],'gray',lw=5,alpha=0.3) plt.axis('equal'); plt.title('PP Plot') plt.subplot(224) plt.plot(x, rv.ppf(y)) mn = np.min(x) mx = np.max(x) plt.plot([mn,mx],[mn,mx],'gray',lw=5,alpha=0.3) plt.axis('equal'); plt.title('QQ Plot'); #print(mn,mx) # + [markdown] hidden=true # No es exactamente normal. A pesar de ello, si las nubes no están muy solapadas el clasificador se comportará bien. # + [markdown] heading_collapsed=true hidden=true # ### Objetos extremos # + hidden=true raro=np.argmax(data) shdig(selected[raro]) # + hidden=true raros = sorted(range(len(selected)),key=lambda k:d(tr[k])) # + hidden=true plt.figure(figsize=(12,4)) plt.imshow(1-np.bmat([[ selected[raros[-k]].reshape(28,28) for k in range(1,11)]]),'gray',vmin=0,vmax=1); plt.axis('off'); # + [markdown] heading_collapsed=true # ## Regularización # + [markdown] hidden=true # Para conseguir **generalización** es necesario controlar la capacidad de la máquinas de aprendizaje. # + [markdown] hidden=true # Vamos a ilustrar este principio con una máquina lineal. Seleccionamos dos clases y ponemos las salidas deseadas de la máquina a valores +1 y -1: # + hidden=true n = 100 ca = 4 cb = 9 # seleccionamos las posiciones de las clases que nos interesan sel_l = (cl == ca) | (cl==cb) sel_t = (ct == ca) | (ct==cb) # extraemos esas posiciones # x e y seleccionadas para aprendizaje # usaré solo los n primeros para aprender xsl = xl[sel_l][:n] ysl = cl[sel_l].astype(int)[:n] # y ponemos correctamente los valores deseados, positivo o negativo ysl[ysl==ca] = 1 ysl[ysl==cb] = -1 # y lo mismo para el x e y seleccionadas para test (evaluación independiente) xst = xt[sel_t] yst = ct[sel_t].astype(int) yst[yst==ca] = 1 yst[yst==cb] = -1 # + hidden=true np.sum(sel_l) # + hidden=true def shdig(v): x = np.reshape(v,[28,28]) plt.imshow(1-x, 'gray', vmin=0, vmax=1, interpolation="nearest"); # + hidden=true k1,k2 = 55, 56 plt.figure(figsize=(12,4)) plt.subplot(1,2,1) shdig(xsl[k1]) plt.title(ysl[k1]) plt.subplot(1,2,2) shdig(xsl[k2]) plt.title(ysl[k2]); # + hidden=true xsl.shape # + hidden=true yst # + [markdown] hidden=true # conveniente para añadir el término independiente (offset) a una máquina lineal # + hidden=true def homog(x): r,c = x.shape return np.hstack([x, np.ones([r,1])]) # + [markdown] hidden=true # solución de mínimos cuadrados para un sistema lineal # + [markdown] hidden=true # Deseo encontrar $W$ tal que `xsl @ w = ysel` # # O sea, resolver $X w= y$ # # Usarmos `lstsq` del módulo de álgebra lineal `numpy.linalg`, que obtiene la solución de mínimo error cuadrático de un sistema (ver el notebook de [sistemas de ecuaciones](sistecs.ipynb)). # + [markdown] hidden=true # `lstsq` no es lo ideal para mostrar este efecto en el caso no regularizado, porque para sistemas subdeterminados obtiene la solución de mínima norma, y por tanto, también regulariza. # + hidden=true W,_,_,_ = la.lstsq(homog(xsl),ysl) # + hidden=true #W # + hidden=true #homog(xsl) @ W # + hidden=true #np.sign(homog(xsl) @ W) == np.sign(ysl) # + [markdown] hidden=true # contamos los aciertos # + hidden=true np.sum(np.sign(homog(xsl) @ W) == np.sign(ysl)), len(ysl) # + [markdown] hidden=true # Tiene buena pinta, acierta todos los ejemplos de entrenamiento. # + hidden=true np.sign(homog(xst) @ W) == np.sign(yst) # + hidden=true np.sum(np.sign(homog(xst) @ W) == np.sign(yst)), len(yst) # + hidden=true k1,k2 = 55, 56 plt.figure(figsize=(12,4)) plt.subplot(1,2,1) shdig(xsl[k1]) plt.title((homog(xsl) @ W)[k1]) plt.subplot(1,2,2) shdig(xsl[k2]) plt.title((homog(xsl) @ W)[k2]); # + [markdown] hidden=true # Obtiene exactamente los valores deseados $\pm 1$, ya que tiene más grados de libertad (coeficientes ajustables) que restricciones (ecuaciones, número de ejemplos de entrenamiento). Esto inspira poca confianza en el comportamiento con ejemplos desconocidos: # + hidden=true k1,k2 = 70, 55 plt.figure(figsize=(12,4)) plt.subplot(1,2,1) shdig(xst[k1]) plt.title((homog(xst) @ W)[k1]) plt.subplot(1,2,2) shdig(xst[k2]) plt.title((homog(xst) @ W)[k2]); # + [markdown] hidden=true # Vamos a construir una solución regularizada, que penaliza con un peso $\lambda$ el tamaño de los coeficientes, para que se reduzca la interpolación de detalles irrelevantes. # + [markdown] hidden=true # La solución regularizada es muy parecida a la de mínimos cuadrados, pero hay que "inflar" la covarianza $X^TX$ con $\lambda$. En lugar de # # $w = (X^T X) ^{-1} X^T y$ # # (esto es lo que hace internamente lstsq, es la "pseudoinversa" de X, por y) # # hacemos # # $w = (X^T X + \lambda I) ^{-1} X^T y$ # + hidden=true lam = 2E2 D = np.diag(lam*np.ones([784+1])) D[-1,-1] = 0 # el coeficiente b no se regulariza, # porque la posición del hiperplano puede ser cualquiera, no hay que # promover que se acerque al origen # + hidden=true #D # + hidden=true xh = homog(xsl) Wr = la.solve(xh.T @ xh + D, xh.T @ ysl) # + hidden=true np.sum(np.sign(homog(xsl) @ Wr) == np.sign(ysl)), len(ysl) # + hidden=true np.sum(np.sign(homog(xst) @ Wr) == np.sign(yst)), len(yst) # + [markdown] hidden=true # **Ejercicio**: crea una curva comparando $E_L$ con $E_T$ para valores crecientes de $\lambda$. # + hidden=true Lam = [0.01, 0.1, 1, 5, 10, 50, 100, 200, 500, 1000, 2000, 3000, 5000] def regu(): xh = homog(xsl) L = [] T = [] for l in Lam: lam = 2E2 D = np.diag(l*np.ones([784+1])) D[-1,-1] = 0 Wr = la.solve(xh.T @ xh + D, xh.T @ ysl) EL = np.sum(np.sign(homog(xsl) @ Wr) == np.sign(ysl)), len(ysl) ET = np.sum(np.sign(homog(xst) @ Wr) == np.sign(yst)), len(yst) L.append(EL[0]/EL[1]) T.append(ET[0]/ET[1]) return 1-np.array(L), 1-np.array(T) # + hidden=true plt.figure(figsize=(8,6)) l,t = regu() plt.plot(100*l,'o-',label='training',color='red') plt.plot(100*t,'o-',label='test',color='green') plt.xticks(np.arange(12), Lam, rotation=45) plt.legend() plt.xlabel('$\lambda$'); plt.ylabel('error %') plt.title('Regularization'); # + [markdown] hidden=true # Esta gráfica ilustra el principio teórico fundamental de *machine learning*: la **generalización** está relacionada con la **capacidad** de la máquina. # + [markdown] heading_collapsed=true # ## *Adversarial examples* # + [markdown] hidden=true # Es posible sintetizar instancias aparentemente inocentes pero que confunden al clasificador. # + [markdown] heading_collapsed=true hidden=true # ### Gaussian classifier # + hidden=true from sklearn import decomposition, discriminant_analysis def acc(maq,x,y): return 100*(y == maq.predict(x)).sum() / len(y) # + hidden=true transformer = decomposition.PCA(n_components=40).fit(xl) # + hidden=true xrl = transformer.transform(xl) xrt = transformer.transform(xt) # + [markdown] hidden=true # Un clasificador "naive Bayes" tiene más de un 12% de errores, mientras que el gaussiano completo consigue menos de 4%: # + hidden=true maq = discriminant_analysis.QuadraticDiscriminantAnalysis(store_covariance=True).fit(xrl,cl) # + hidden=true acc(maq,xrt,ct) # + [markdown] heading_collapsed=true hidden=true # ### Adversarial examples # + hidden=true def mkg(transformer,maquina,cl,v): d0 = transformer.transform([v])[0] - maquina.means_[cl] d1 = np.linalg.inv(maquina.covariance_[cl]) @ d0 d2 = transformer.inverse_transform(d1) return d2 # + hidden=true cdesired = 5 k = 1234 v0 = xt[k] v = v0 corig = ct[k] shdig(v0); plt.title(corig); redu = transformer.transform([v]) maq.predict_proba(redu)[0][[cdesired,corig]] # + hidden=true for _ in range(10): g = mkg(transformer, maq, corig, v) - mkg(transformer, maq, cdesired, v) v = np.clip(v + 0.01*g, 0, 1) redu = transformer.transform([v]) cp = maq.predict(redu)[0] if cp != corig: break shdig(v) plt.title(cp) maq.predict_proba(redu)[0][[cdesired,corig]] # + hidden=true shdig(abs(v-v0)) print(np.sum(abs(v-v0))) # + [markdown] heading_collapsed=true hidden=true # ### Random inputs # + hidden=true v0 = np.random.rand(28,28).flatten() shdig(v0) v = v0 redu = transformer.transform([v]) plt.title(maq.predict(redu)[0]); maq.predict_proba(redu)[0].max() # + hidden=true cdesired = 0 for _ in range(3): g = - mkg(transformer, maq, cdesired, v) v = np.clip(v + 0.01*g, 0, 1) redu = transformer.transform([v]) cp = maq.predict(redu)[0] shdig(v) plt.title(cp) maq.predict_proba(redu)[0][cdesired] # + hidden=true maq.predict_proba(redu)[0] # + hidden=true shdig(abs(v-v0)) print(np.sum(abs(v-v0))) # + [markdown] heading_collapsed=true # ## Otras máquinas de aprendizaje # + [markdown] heading_collapsed=true hidden=true # ### Naive Bayes # + hidden=true from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() maq = gnb.fit(xl, cl) # + hidden=true acc(maq,xt,ct) # + hidden=true maq.predict(digits) # + hidden=true maq.sigma_ = maq.sigma_ * 0 + 1 # + hidden=true acc(maq,xt,ct) # + hidden=true maq.predict(digits) # + [markdown] heading_collapsed=true hidden=true # ### Support vector machine (SVM) # + hidden=true from sklearn import svm classifier = svm.SVC(gamma=0.01, C=0.1) #classifier = svm.SVC(gamma=0.001) classifier.kernel # + hidden=true maq = classifier.fit(xl[:5000], cl[:5000]) maq.support_vectors_.shape # + hidden=true acc(maq,xt,ct) # + hidden=true maq.predict(digits) # + hidden=true #import pickle #s = pickle.dumps(maq) # + hidden=true #from sklearn.externals import joblib #joblib.dump(maq, 'svm.pkl') # + hidden=true #maq = joblib.load('svm.pkl') # + [markdown] heading_collapsed=true hidden=true # ### Gradient Boosting # + hidden=true from sklearn import ensemble # + hidden=true clf = ensemble.GradientBoostingClassifier(subsample=0.1, n_estimators=50, max_features=50, min_samples_split=10) clf.fit(xl, cl) # + hidden=true clf.score(xl,cl), clf.score(xt,ct) # + [markdown] heading_collapsed=true hidden=true # ### Random Forest # + hidden=true clf = ensemble.RandomForestClassifier(n_estimators=100,n_jobs=-1) clf.fit(xl, cl) # + hidden=true clf.score(xl,cl), clf.score(xt,ct) # + [markdown] heading_collapsed=true hidden=true # ### CNN # + [markdown] hidden=true # Red convolucional profunda (ver [deep learning](tensorflow.ipynb)). # + hidden=true from keras.models import Sequential from keras.layers import Dense, Conv2D, MaxPool2D, Dropout, Softmax, Flatten model = Sequential() model.add(Conv2D(input_shape=(28,28,1), filters=32, kernel_size=(5,5), strides=1, padding='same', use_bias=True, activation='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Conv2D(filters=64, kernel_size=(5,5), strides=1, padding='same', use_bias=True, activation='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Flatten()) model.add(Dense(1024)) model.add(Dropout(rate=0.5)) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # + hidden=true if False: model.fit(xl.reshape(-1,28,28,1), yl, epochs=50, batch_size=500) #model.save('digits.keras') else: #wget https://robot.inf.um.es/material/va/digits.keras model.load_weights('../data/models/digits.keras') model.evaluate(xt.reshape(-1,28,28,1),yt, batch_size=500) # + hidden=true plt.imshow(-np.hstack([x.reshape(28,28) for x in ok]),'gray'); plt.axis('off'); model.predict_classes(np.array(ok).reshape(-1,28,28,1))
notebooks/machine-learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.model_selection import train_test_split X, y = datasets.make_classification(n_samples = 2000, n_features = 2, n_informative = 2, n_repeated = 0, n_redundant = 0, n_classes = 3, n_clusters_per_class =1, class_sep = 0.9, random_state = 8) plt.figure(figsize = (12, 8)) plt.scatter(X[:, 0], X[:, 1], c = y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) def prepare_matrices(x, y): ''' This function prepares x and y matrices with appropriate shapes. Arguements: x: Input Feature Matrix (numpy array of shape (m, n), m = number of examples, n = number of features) y: Output Vector / Target Vector (numpy array of shape (m, 1)) Returns: x_new: Processed feature Matrix (numpy array of shape (n, m)) y_new: Processed output Vector / Target Vector (numpy array of shape (c, m)), c = number of classes, m = number of examples y_new[:, i] is one hot vector in which index of value 1 corresponds to the class of that instance of data. ''' m = x.shape[0] n = x.shape[1] new_x = x.T n_class = np.unique(y).shape[0] new_y = np.empty(shape = (n_class, m)) for i in range(m): if y[i] == 0: new_y[:, i] = np.array([1, 0, 0]) elif y[i] == 1: new_y[:, i] = np.array([0, 1, 0]) else: new_y[:, i] = np.array([0, 0, 1]) return new_x, new_y def initialize_parameters(n, c, method = 'zeros'): ''' This function initializes parameter matrix W and bias vector b. Arguements: n: Number of features c: Number of classes method: zeros / random. The method used to initialize paramter matrix. (default: 'zeros', i.e. initialization with zeros) Returns: W: Parameter matrix of shape (c, n), n = number of features, c = number of classes b: Bias vector of shape (c, 1) ''' if method == 'zeros': W = np.zeros(shape = (c, n)) b = np.zeros(shape = (c, 1)) elif method == 'random': W = np.random.rand(c, n) b = np.random.rand(c, 1) assert(W.shape == (c, n)) assert(b.shape == (c, 1)) return W, b def softmax(z): ''' This function calculates softmax function of the input array. Arguements: z: Input array whose softmax function needs to be calculated, Note: The array shall be of the form (m, 1). Returns: z_softmax: Softmax function of the input array. The size of the output array will be same as that as input. ''' expz = np.exp(z - np.max(z)) return expz / np.sum(expz, axis = 0, keepdims = True) def forward_prop(parameters, x): ''' This function performs forward propagation on the network (the network for this implementation only includes softmax layer). Arguements: parameters: a tuple (W, b), where W is parameter matrix of shape (c, n) and b is bias matrix of shape (c, 1) x: Feature matrix of shape (n, m), m = number of training examples, n = number of features Returns: y_hat: Calculated hypothesis / output matrix of shape (c, m) ''' W, b = parameters c = W.shape[0] m = x.shape[1] z = np.dot(W, x) + b y_hat = np.empty_like(z) for i in range(m): y_hat[:, i] = softmax(z[:, i]) return y_hat def backword_prop(y_hat, y, x): ''' This function performs backword propagation on the network (the network for this implementation only includes softmax layer). Arguements: y_hat: Hypothesis matrix of shape (c, m), c = number of classes, m = number of training examples y: Output matrix of shape (c, m) x: Feature Matrix (numpy array of shape (n, m)) Returns: dW: Derivative of parameters W (numpy array of shape (c, n)) db: Derivative of bias b (numpy array of shape (c, 1)) ''' m = x.shape[1] n = x.shape[0] c = y.shape[0] dW = np.matmul((y_hat - y), x.T) / m db = np.sum((y_hat - y), axis = 1, keepdims = True) / m assert(dW.shape == (c, n)) assert(db.shape == (c, 1)) return dW, db def cost_calc(y_hat, y): ''' This function calculates cross entropy loss / cost of the classifier. Arguements: y_hat: Hypothesis matrix of shape (c, m), c = number of classes, m = number of training examples y: Output matrix of shape (c, m) Returns: cost: Cross entropy loss or cost ''' m = y.shape[1] cost = -np.sum(np.multiply(y, np.log(y_hat + 0.0000000001))) / m return cost def convergence_check(costs, epsilon): ''' This function checks convergence of gradient descent algorithm. Arguements: costs: A list containing cost values of current and previous iterations epsilon: Threshold of square error difference between costs of consecutive iterations used to decide convergence of gradient descent algorithm Returns: Boolean (True / False) value of whether algorithm has been converged ''' error = (costs[0] - costs[1]) ** 2 return error < epsilon def softmax_regression(X, y, num_iterations = 50000, algo_type = 'batch', learning_rate = 0.1, epsilon = 1e-05, verbose = True, initialization = 'zeros'): ''' This function performs softmax regression using gradient descent algorithm for minimising cost. Arguements: X: Feature Matrix (numpy array of shape (m, n), m = number of examples, n = number of features) y: Output Vector / Target Vector (numpy array of shape (m, 1)) num_iterations (optional): Max number of iterations (default value: 50000) (if convergence is acheived before this number, algorithm will be stopped) algo_type: 'batch' / 'stochastic' for batch / stochastic gradient descent algorithms. Type of algorithm to be used for finding parameters learning_rate (optional): Value for learning rate (default value: 0.1) epsilon (optional): Threshold of square error difference between costs of consecutive iterations used to decide convergence of gradient descent algorithm (default value = 1e-05) verbose (optional): Boolean value which decide whether the output of the algorithm will be verbose initialization (optional): 'zeros' / 'random', parameter used for method of initialization of parameter matrix Returns: parameters: A tuple (W, b), where W is parameter matrix of shape (c, n) and b is bias matrix of shape (c, 1) costs: A dictionary with learning rate as key and list of costs for every 100th iteration as value ''' X_processed, y_processed = prepare_matrices(X, y) n = X_processed.shape[0] c = y_processed.shape[0] W, b = initialize_parameters(n, c, method = initialization) y_hat = forward_prop((W, b), X_processed) cost = cost_calc(y_hat, y_processed) if verbose: print('Initial Cost:{}'.format(cost)) costs = {} costs_list = [] costs_list.append(cost) if algo_type == 'batch': for i in range(num_iterations): y_hat = forward_prop((W, b), X_processed) dW, db = backword_prop(y_hat, y_processed, X_processed) cost = cost_calc(y_hat, y_processed) W = W - (learning_rate * dW) b = b - (learning_rate * db) if verbose: if ((i + 1) % 100) == 0: print('Iteration:{}, Cost:{}'.format((i+1), cost)) if ((i + 1) % 100) == 0: costs_list.append(cost) if len(costs_list) >= 2: if convergence_check(costs_list[-2:], epsilon): print('Alogorithm has converged!!') break costs[learning_rate] = costs_list elif algo_type == 'stochastic': y_hat = forward_prop((W, b), X_processed) for i in range(num_iterations): index = i % (len(y) - 1) W = W - learning_rate * np.matmul((y_hat[:, index].reshape((c, 1)) - y_processed[:, index].reshape((c, 1))) , X_processed[:, index].reshape((1, n))) b = b - learning_rate * (y_hat[:, index].reshape((c, 1)) - y_processed[:, index].reshape(c, 1)) y_hat = forward_prop((W, b), X_processed) cost = cost_calc(y_hat, y_processed) if verbose: if ((i + 1) % 100) == 0: print('Iteration:{}, Cost:{}'.format((i+1), cost)) if ((i + 1) % 100) == 0: costs_list.append(cost) if len(costs_list) >= 2: if convergence_check(costs_list[-2:], epsilon): print('Alogorithm has converged') break costs[learning_rate] = costs_list return W, b, costs def accuracy_calc(W, b, X, y): ''' This function calculates accuracy of classification and predicted output (y) using parameters and feature matrix. Arguements: W: Parameter matrix of shape (c, n), n = number of features, c = number of classes b: Bias matrix of shape (c, 1) X: Feature Matrix (numpy array of shape (m, n), m = number of examples, n = number of features) y: Output Vector / Target Vector (numpy array of shape (m, 1)) Returns: accuracy: Accuracy of classification in percentage y_pred: Output / label matrix of shape (m, 1), the values of y will be the predicted class ''' x, _ = prepare_matrices(X, y) m = x.shape[1] y_hat = forward_prop((W, b), x) y_pred = np.argmax(y_hat, axis = 0) accuracy = np.round(np.count_nonzero(y == y_pred) / m, 2) * 100 return y, accuracy # Training of the model: W, b, costs = softmax_regression(X_train, y_train, algo_type = 'batch', initialization = 'random') # Plot learning curve plt.figure(figsize = (12, 8)) learning_rate = list(costs.keys())[0] plt.plot(list(costs.values())[0], label = 'Learning Rate:{}'.format(learning_rate)) plt.xlabel('Number of iterations (100s)') plt.ylabel('Cost') plt.title('Learning Curve') plt.legend() # Calculate train accuracy: _, train_accuracy = accuracy_calc(W, b, X_train, y_train) print('Training Set Accuracy is:{}'.format(train_accuracy)) # Calculate test accuracy: _, test_accuracy = accuracy_calc(W, b, X_test, y_test) print('Test Set Accuracy is:{}'.format(test_accuracy))
Softmax Regression/Softmax regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: py37 # language: python # name: py37 # --- import numpy as np import tensorflow as tf # + n_steps = 2 n_inputs = 3 n_neurons = 5 X = tf.placeholder(tf.float32, [None, n_steps, n_inputs]) X_seqs = tf.unstack(tf.transpose(X, perm=[1, 0, 2])) basic_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons) output_seqs, states = tf.nn.static_rnn(basic_cell, X_seqs, dtype=tf.float32) outputs = tf.transpose(tf.stack(output_seqs), perm=[1, 0, 2]) # - init = tf.global_variables_initializer() # + X_batch = np.array([ # t = 0 t = 1 [[0, 1, 2], [9, 8, 7]], # instance 1 [[3, 4, 5], [0, 0, 0]], # instance 2 [[6, 7, 8], [6, 5, 4]], # instance 3 [[9, 0, 1], [3, 2, 1]], # instance 4 ]) with tf.Session() as sess: init.run() outputs_val = outputs.eval(feed_dict={X: X_batch}) # - print(outputs_val)
14_tensorflow_rnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data generators # # # In Python, a generator is a function that behaves like an iterator. It will return the next item. Here is a [link](https://wiki.python.org/moin/Generators) to review python generators. In many AI applications, it is advantageous to have a data generator to handle loading and transforming data for different applications. # # You will now implement a custom data generator, using a common pattern that you will use during all assignments of this course. # In the following example, we use a set of samples `a`, to derive a new set of samples, with more elements than the original set. # # **Note: Pay attention to the use of list `lines_index` and variable `index` to traverse the original list.** # + import random import numpy as np # Example of traversing a list of indexes to create a circular list a = [1, 2, 3, 4] b = [0] * 10 a_size = len(a) b_size = len(b) lines_index = [*range(a_size)] # is equivalent to [i for i in range(0,a_size)], the difference being the advantage of using * to pass values of range iterator to list directly index = 0 # similar to index in data_generator below for i in range(b_size): # `b` is longer than `a` forcing a wrap # We wrap by resetting index to 0 so the sequences circle back at the end to point to the first index if index >= a_size: index = 0 b[i] = a[lines_index[index]] # `indexes_list[index]` point to a index of a. Store the result in b index += 1 print(b) # - # ## Shuffling the data order # # In the next example, we will do the same as before, but shuffling the order of the elements in the output list. Note that here, our strategy of traversing using `lines_index` and `index` becomes very important, because we can simulate a shuffle in the input data, without doing that in reality. # # + # Example of traversing a list of indexes to create a circular list a = [1, 2, 3, 4] b = [] a_size = len(a) b_size = 10 lines_index = [*range(a_size)] print("Original order of index:",lines_index) # if we shuffle the index_list we can change the order of our circular list # without modifying the order or our original data random.shuffle(lines_index) # Shuffle the order print("Shuffled order of index:",lines_index) print("New value order for first batch:",[a[index] for index in lines_index]) batch_counter = 1 index = 0 # similar to index in data_generator below for i in range(b_size): # `b` is longer than `a` forcing a wrap # We wrap by resetting index to 0 if index >= a_size: index = 0 batch_counter += 1 random.shuffle(lines_index) # Re-shuffle the order print("\nShuffled Indexes for Batch No.{} :{}".format(batch_counter,lines_index)) print("Values for Batch No.{} :{}".format(batch_counter,[a[index] for index in lines_index])) b.append(a[lines_index[index]]) # `indexes_list[index]` point to a index of a. Store the result in b index += 1 print() print("Final value of b:",b) # - # **Note: We call an epoch each time that an algorithm passes over all the training examples. Shuffling the examples for each epoch is known to reduce variance, making the models more general and overfit less.** # # # # # ### Exercise # # **Instructions:** Implement a data generator function that takes in `batch_size, x, y shuffle` where x could be a large list of samples, and y is a list of the tags associated with those samples. Return a subset of those inputs in a tuple of two arrays `(X,Y)`. Each is an array of dimension (`batch_size`). If `shuffle=True`, the data will be traversed in a random form. # # **Details:** # # This code as an outer loop # ``` # while True: # ... # yield((X,Y)) # ``` # # Which runs continuously in the fashion of generators, pausing when yielding the next values. We will generate a batch_size output on each pass of this loop. # # It has an inner loop that stores in temporal lists (X, Y) the data samples to be included in the next batch. # # There are three slightly out of the ordinary features. # # 1. The first is the use of a list of a predefined size to store the data for each batch. Using a predefined size list reduces the computation time if the elements in the array are of a fixed size, like numbers. If the elements are of different sizes, it is better to use an empty array and append one element at a time during the loop. # # 2. The second is tracking the current location in the incoming lists of samples. Generators variables hold their values between invocations, so we create an `index` variable, initialize to zero, and increment by one for each sample included in a batch. However, we do not use the `index` to access the positions of the list of sentences directly. Instead, we use it to select one index from a list of indexes. In this way, we can change the order in which we traverse our original list, keeping untouched our original list. # # 3. The third also relates to wrapping. Because `batch_size` and the length of the input lists are not aligned, gathering a batch_size group of inputs may involve wrapping back to the beginning of the input loop. In our approach, it is just enough to reset the `index` to 0. We can re-shuffle the list of indexes to produce different batches each time. def data_generator(batch_size, data_x, data_y, shuffle=True): ''' Input: batch_size - integer describing the batch size data_x - list containing samples data_y - list containing labels shuffle - Shuffle the data order Output: a tuple containing 2 elements: X - list of dim (batch_size) of samples Y - list of dim (batch_size) of labels ''' data_lng = len(data_x) # len(data_x) must be equal to len(data_y) index_list = [*range(data_lng)] # Create a list with the ordered indexes of sample data # If shuffle is set to true, we traverse the list in a random way if shuffle: random.shuffle(index_list) # Inplace shuffle of the list index = 0 # Start with the first element # START CODE HERE # Fill all the None values with code taking reference of what you learned so far while True: X = None # We can create a list with batch_size elements. Y = None # We can create a list with batch_size elements. for i in range(batch_size): # Wrap the index each time that we reach the end of the list if index >= data_lng: index = None # Shuffle the index_list if shuffle is true if shuffle: None # re-shuffle the order X[i] = None # We set the corresponding element in x Y[i] = None # We set the corresponding element in x # END CODE HERE index += 1 yield((X, Y)) # If your function is correct, all the tests must pass. # + def test_data_generator(): x = [1, 2, 3, 4] y = [xi ** 2 for xi in x] generator = data_generator(3, x, y, shuffle=False) assert np.allclose(next(generator), ([1, 2, 3], [1, 4, 9])), "First batch does not match" assert np.allclose(next(generator), ([4, 1, 2], [16, 1, 4])), "Second batch does not match" assert np.allclose(next(generator), ([3, 4, 1], [9, 16, 1])), "Third batch does not match" assert np.allclose(next(generator), ([2, 3, 4], [4, 9, 16])), "Fourth batch does not match" print("\033[92mAll tests passed!") test_data_generator() # - # If you could not solve the exercise, just run the next code to see the answer. # + import base64 solution = "<KEY>" # Print the solution to the given assignment print(base64.b64decode(solution).decode("utf-8")) # - # ### Hope you enjoyed this tutorial on data generators which will help you with the assignments in this course.
Natural Language Processing with Sequence Models/Week 1 - Neural Netowrks for Sentiment Analysis/NLP_C3_W1_lecture_nb_03_data_generatos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In this file we are talking about how to deal with loading of datasets # These Datasets are widely used in machine learning # we are using pandas X = [] # importing the libraries import numpy as np import pandas as pd for line in open('data_2d.csv'): row= line.split(',') # the split function seperates the text values # it finds commas and after each comma it considers the next value as an element # until it finds the next comma sample = list(map(float, row)) # the map function returns the data after applying the first argument to each element in the list # in this case it is type casting each element as float # the list function is used to type cast sample as a list item X.append(sample) # now we have the data in the format on which mathematical operations can be done # so we append the elements into our main list X = np.array(X) # converting list into a numpy array print(X.shape) # gives information about the dimensions of the array (rows, colmns) print(X) # this method which is displayed above is an example of loading the data manually # that is loading the data without pandas # Now we are going to use pandas, which makes loading and working on datasets effortless X = pd.read_csv("data_2d.csv", header=None) # pandas method to load .csv type dataset print(X.head()) # displays the first 5 rows by default and n rows if passed as argument # let us see the datatype of object created using pandas print(type(X)) # + # A Data frame is a two-dimensional data structure, i.e., data is aligned in a tabular fashion in rows and columns. # gives the info about the type of data in our Dataframe X.info() # -
PandasBasics/ManualDataLoading .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## GRASP # + import sys sys.path.append('../src/') # %load_ext autoreload # %autoreload 2 # %matplotlib inline import pandas as pd import matplotlib import matplotlib.pyplot as plt from alg import * from utils import * # + num_trials = 5 resources = [5, 10, 15, 20] def opt_bound(R, p): _, m, r, = lpt(R, p) return m / r def lpt_value(R, p): _, m, _, = lpt(R, p) return m def makespan(R, p, cc_ratio=0.5, max_iters=1000, hc_iters=10): _, m = grasp(R=R, p=p, cc_ratio=cc_ratio, max_iters=max_iters, hc_iters=hc_iters) return m # - # ## Comparison on sub-optimal instance # ### Varying candidate component ratio # + def eval_subopt_lpt(R): _, m, r, = lpt(R, p=suboptimal_instance(R)) return m / r, m lpt_df = pd.DataFrame(data=(eval_subopt_lpt(R) for R in resources), index=resources, columns=['opt_bound', 'lpt']) # - def eval_subopt_grasp(**kwargs): for R in resources: yield sum(makespan(R, suboptimal_instance(R), **kwargs) for _ in range(num_trials)) / num_trials # + cc_ratios = [0.25, 0.5, 0.75] grasp_df = pd.DataFrame({f'grasp ({cc_ratio})': eval_subopt_grasp(cc_ratio=cc_ratio) for cc_ratio in cc_ratios}, index=resources) pd.concat([lpt_df, grasp_df], axis=1) # - df = grasp_df.sub(lpt_df.opt_bound, axis=0).div(lpt_df.lpt - lpt_df.opt_bound, axis=0) df.plot.barh(ylabel='norm. quality(GRASP)/quality(LPT)', xlabel='#resources (R)', title='Quality relative to LPT for sub-optimal instances') # ### Exploitation effect of Hill Climbing # + hc_iters_choices = [0, 10, 100] grasp_df = pd.DataFrame({f'grasp (HC-{hc_iters})': eval_subopt_grasp(hc_iters=hc_iters) for hc_iters in hc_iters_choices}, index=resources) pd.concat([lpt_df, grasp_df], axis=1) # - df = grasp_df.sub(lpt_df.opt_bound, axis=0).div(lpt_df.lpt - lpt_df.opt_bound, axis=0) df.plot.barh(ylabel='norm. quality(GRASP)/quality(LPT)', xlabel='#resources (R)', title='Quality relative to LPT for sub-optimal instances') # ### Effect of increasing max. number of iterations # + max_iters_choices = [1_000, 2_000, 5_000] hc_iters=100 grasp_df = pd.DataFrame( { f'grasp ({max_iters}, HC-{hc_iters})': eval_subopt_grasp(max_iters=max_iters, hc_iters=hc_iters) for max_iters in max_iters_choices }, index=resources, ) df = pd.concat([lpt_df, grasp_df], axis=1) df # - df.plot.line(xlabel='#resources (R)', ylabel='min. makespan (mean for GRASP)')
notebooks/grasp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## BERT run model script # # The purpose of this script is to train and test the BERT models. This script is designed to be used in Google Colaboratory in the sense that: # # - it assumes data will be loaded from both Google Drive and Google Storage Buckets # - it assumes the script will be executed on a Colabotory GPU # # One training epoch on the legislative data (with batch size 25) take about 40 seconds on a Colaboratory GPU # + colab={"base_uri": "https://localhost:8080/", "height": 122} colab_type="code" executionInfo={"elapsed": 5186, "status": "ok", "timestamp": 1564139206409, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-TnPwXZfZr68/AAAAAAAAAAI/AAAAAAAAAMA/FW6bTWy9_is/s64/photo.jpg", "userId": "04086047539127212340"}, "user_tz": -60} id="r9hzH1lke221" outputId="632bfe59-a632-4fa4-b541-f287369fcf9e" # Import dependencies # !pip install bert-tensorflow import pandas as pd import tensorflow as tf import math import tensorflow_hub as hub import sys import bert import numpy as np from bert import run_classifier from bert import optimization from datetime import datetime from google.colab import drive from sklearn.metrics import confusion_matrix # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 2981, "status": "ok", "timestamp": 1564139213228, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-TnPwXZfZr68/AAAAAAAAAAI/AAAAAAAAAMA/FW6bTWy9_is/s64/photo.jpg", "userId": "04086047539127212340"}, "user_tz": -60} id="170TqhvQfO91" outputId="91708d8f-d4b4-4947-d8d5-d040bfb6d0cb" # Mount Google Drive drive.mount('/content/gdrive', force_remount=True) # + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 679, "status": "ok", "timestamp": 1564139216741, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-TnPwXZfZr68/AAAAAAAAAAI/AAAAAAAAAMA/FW6bTWy9_is/s64/photo.jpg", "userId": "04086047539127212340"}, "user_tz": -60} id="JNoTGgTCfZdr" outputId="6bdf0edc-0c54-4463-8fd0-eb28c6a80874" # Import local modules - if taken to production, these would be properly packaged... sys.path.insert(0, '/content/gdrive/My Drive/') # Set path to Google Drive location (location of beam_search and model modules) import beam_search import model # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 4035, "status": "ok", "timestamp": 1564139223531, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-TnPwXZfZr68/AAAAAAAAAAI/AAAAAAAAAMA/FW6bTWy9_is/s64/photo.jpg", "userId": "04086047539127212340"}, "user_tz": -60} id="mTansETAfnPY" outputId="e2461670-8435-48c8-f682-d0cd84dd31e2" # Check GPU runtime device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': raise SystemError('GPU device not found') print('Found GPU at: {}'.format(device_name)) from google.colab import auth auth.authenticate_user() # + colab={} colab_type="code" id="zqiJFS52f8p_" # Model parameters # Encoder (BERT loaded from tfhub) BERT_MODEL_HUB = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1" # This is a path to an uncased (all lowercase) version of BERT # Decoder params = { "num_hidden_layers":6, "hidden_size":6, "num_heads":3, "filter_size":512, "relu_dropout":0.1, "allow_ffn_pad":True, "layer_postprocess_dropout":0.1, "attention_dropout":0.1, "initializer_gain":1.0, "label_smoothing":0.1, "beam_size":4, "alpha":1, "bucket":'BUCKET_NAME', # Set Google Storage Bucket name "task_data_dir":'gs://BUCKET_NAME/', # Set Google Storage Bucket name "output_dir":'gs://{}/XXXXX/' # Set Google Storage Bucket output directory } # Compute train and warmup steps from batch size # These transfer learning hyperparameters are taken from the 'BERT fine-tuning' notebook # (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb) BATCH_SIZE = 25 LEARNING_RATE = 2e-5 NUM_TRAIN_EPOCHS = 10 # Warmup is a period of time where the learning rate # is small and gradually increases--usually helps training. WARMUP_PROPORTION = 0.1 # Model configs SAVE_CHECKPOINTS_STEPS = 1000 SAVE_SUMMARY_STEPS = 500 MAX_SEQ_LENGTH = 128 # This should match the max seq length set when creating the input file TRAIN_FILE = '' # Set data path VALIDATION_FILE = '' # Set data path TEST_FILE = '' # Set data path NUM_TRAINING_EXAMPLES = 750 # Set number of training examples num_train_steps = int(NUM_TRAINING_EXAMPLES/ BATCH_SIZE * NUM_TRAIN_EPOCHS) num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION) # + cellView="code" colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 5530, "status": "ok", "timestamp": 1564139293768, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-TnPwXZfZr68/AAAAAAAAAAI/AAAAAAAAAMA/FW6bTWy9_is/s64/photo.jpg", "userId": "04086047539127212340"}, "user_tz": -60} id="sElDxvNnfsPP" outputId="5316509e-0225-48fb-d7a3-887c15274508" # Set data input and output directory (these are taken from params) TASK_DATA_DIR = params['task_data_dir'] print('***** Task data directory: {} *****'.format(TASK_DATA_DIR)) # !gsutil ls $TASK_DATA_DIR BUCKET = params['bucket'] assert BUCKET, 'Must specify an existing GCS bucket name' OUTPUT_DIR = params['output_dir'].format(BUCKET) tf.gfile.MakeDirs(OUTPUT_DIR) print('***** Model output directory: {} *****'.format(OUTPUT_DIR)) # + colab={} colab_type="code" id="WGKP0Wd8iTy7" # Adapted from https://github.com/tensorflow/models/tree/master/official/transformer # Define the transformer encoder/decoder model def create_model(is_predicting, train, input_ids, input_mask, segment_ids, labels, r_id): """Creates transformer model.""" bert_module = hub.Module( BERT_MODEL_HUB, trainable=True) bert_inputs = dict( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids) bert_outputs = bert_module( inputs=bert_inputs, signature="tokens", as_dict=True) # Use "pooled_output" for classification tasks on an entire sentence. # Use "sequence_outputs" for token-level output. output_layer = bert_outputs["sequence_output"] initializer = tf.variance_scaling_initializer(params["initializer_gain"], mode="fan_avg", distribution="uniform") with tf.variable_scope("Decoder_transformer_loss", initializer=initializer): decoder_embeddings = model.DecoderEmbeddings() decoder_stack = model.DecoderStack(params, train) attention_bias = model.get_padding_bias(input_mask) if is_predicting: predictions = predict(output_layer, attention_bias, decoder_embeddings, decoder_stack, params) return predictions else: logits = model.decode(labels, output_layer, attention_bias, decoder_embeddings, decoder_stack, train, params) xentropy, weights = model.padded_cross_entropy_loss(logits, labels, params["label_smoothing"], 6) loss = tf.reduce_sum(xentropy) / tf.reduce_sum(weights) predictions = tf.argmax(logits, axis=-1) return (loss, predictions, xentropy) # + colab={} colab_type="code" id="GKZM6QV-UP6u" # Adapted from https://github.com/tensorflow/models/tree/master/official/transformer # Prediction time function (implements beam search decoding strategy) def predict(encoder_outputs, encoder_decoder_attention_bias, decoder_embeddings, decoder_stack, params): """Return predicted sequence.""" batch_size = tf.shape(encoder_outputs)[0] input_length = tf.shape(encoder_outputs)[1] max_decode_length = input_length symbols_to_logits_fn = model._get_symbols_to_logits_fn(max_decode_length, decoder_embeddings, decoder_stack, params) # Create initial set of IDs that will be passed into symbols_to_logits_fn. initial_ids = tf.zeros([batch_size], dtype=tf.int32) # Create cache storing decoder attention values for each layer. cache = { "layer_%d" % layer: { "k": tf.zeros([batch_size, 0, params["hidden_size"]]), "v": tf.zeros([batch_size, 0, params["hidden_size"]]), } for layer in range(params["num_hidden_layers"])} # Add encoder output and attention bias to the cache. cache["encoder_outputs"] = encoder_outputs cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias # Use beam search to find the top beam_size sequences and scores. decoded_ids, scores = beam_search.sequence_beam_search( symbols_to_logits_fn=symbols_to_logits_fn, initial_ids=initial_ids, initial_cache=cache, vocab_size=6, beam_size=params["beam_size"], alpha=params["alpha"], max_decode_length=max_decode_length, eos_id=6, decoder_embeddings=decoder_embeddings, decoder_stack=decoder_stack) # setting vocab_size to 6 for labels number. EOS is 4 # Get the top sequence for each batch element top_decoded_ids = decoded_ids[:, 0, 1:] top_scores = scores[:, 0] return {"outputs": top_decoded_ids, "scores": top_scores} # + colab={} colab_type="code" id="x8hmQXmzifgh" # Adapted from https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb # model_fn_builder actually creates the model function # using the passed parameters for num_labels, learning_rate, etc. def model_fn_builder(learning_rate, num_train_steps, num_warmup_steps): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, mode, params): """The `model_fn` for TPUEstimator.""" input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] labels = features["label_ids"] try: r_id = features["r_id"] except: r_id = [] is_predicting = (mode == tf.estimator.ModeKeys.PREDICT) # TRAIN and EVAL if not is_predicting: train = True (loss, predicted_labels, log_probs) = create_model( is_predicting, train, input_ids, input_mask, segment_ids, labels, r_id) train_op = bert.optimization.create_optimizer( loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False) if mode == tf.estimator.ModeKeys.TRAIN: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) else: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics) else: train = False preds = create_model( is_predicting, train, input_ids, input_mask, segment_ids, labels, r_id) predictions = { 'true_labels':labels, 'predicted_labels': preds["outputs"], 'r_id': r_id } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # Return the actual model function in the closure return model_fn # + colab={} colab_type="code" id="R2T7oJG2jmfJ" # Adapted from https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([seq_length], tf.int64), "r_id": tf.FixedLenFeature([1], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn # + colab={} colab_type="code" id="CHh82xQvjGf2" # Adapted from https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb # Specify outpit directory and number of checkpoint steps to save run_config = tf.estimator.RunConfig( model_dir=OUTPUT_DIR, save_summary_steps=SAVE_SUMMARY_STEPS, save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS) model_fn = model_fn_builder( learning_rate=LEARNING_RATE, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps) estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config, params={"batch_size": BATCH_SIZE}) # + colab={} colab_type="code" id="Oepu5BmZjghH" # Adapted from https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb # Create an input function for training. drop_remainder = True for using TPUs. train_input_fn = file_based_input_fn_builder( input_file=LEG_TRAIN_FILE, seq_length=MAX_SEQ_LENGTH, is_training=True, drop_remainder=False) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 391129, "status": "ok", "timestamp": 1564139723858, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-TnPwXZfZr68/AAAAAAAAAAI/AAAAAAAAAMA/FW6bTWy9_is/s64/photo.jpg", "userId": "04086047539127212340"}, "user_tz": -60} id="e6V9b9C7jqhe" outputId="56aefeb7-a792-4e3c-e045-435220751341" # Run training print(f'Beginning Training!') current_time = datetime.now() estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) print("Training took time ", datetime.now() - current_time) # + colab={} colab_type="code" id="lh4OZLhQjtRt" # Create an input function for testing. drop_remainder = True for using TPUs. pred_input_fn = file_based_input_fn_builder( input_file=TEST_FILE, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False) predictions = estimator.predict(pred_input_fn) # + colab={} colab_type="code" id="KPRKD4Zljys8" # Run predictions and save them in true_label and predicted_labels true_labels = [] predicted_labels = [] r_ids = [] for prediction in predictions: true_labels.append(prediction['true_labels']) predicted_labels.append(prediction['predicted_labels']) r_ids.append(prediction['r_id']) # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 746, "status": "ok", "timestamp": 1564140521442, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-TnPwXZfZr68/AAAAAAAAAAI/AAAAAAAAAMA/FW6bTWy9_is/s64/photo.jpg", "userId": "04086047539127212340"}, "user_tz": -60} id="wmvt1lnej1pd" outputId="a1d0fdad-7649-410d-a9a5-5027f1f2c487" # Run the evalutation # Get rid of padding (truncate preds by input length) def remove_padding(true_labels, predicted_labels): non_pad_labels = [] non_pad_preds = [] for i in range(len(true_labels)): eos = true_labels[i].argmax(axis=0) non_pad_labels.append(true_labels[i][1:eos]) non_pad_preds.append(predicted_labels[i][1:eos]) return non_pad_labels, non_pad_preds non_pad_labels, non_pad_preds = remove_padding(true_labels, predicted_labels) # Raw accuracy correct = 0 total = 0 for i in range(len(non_pad_labels)): for j in range(len(non_pad_labels[i])): total += 1 if non_pad_labels[i][j] == non_pad_preds[i][j]: correct += 1 print(correct/total) labels_concat = np.concatenate((non_pad_labels)) preds_concat = np.concatenate((non_pad_preds)) # Confusion matrix confusion_matrix(labels_concat, preds_concat) # + colab={} colab_type="code" id="1DtHhZEYh_nz" # Export results import pickle results = {} r_ids_nums = [] for i in r_ids: r_ids_nums.append(i[0]) results['true_labels'] = non_pad_labels results['predicted_labels'] = non_pad_preds results['r_ids'] = r_ids_nums results_file = "" # Set results file with open(results_file, 'wb') as f: # Pickle the 'data' dictionary using the highest protocol available. pickle.dump(results, f, pickle.HIGHEST_PROTOCOL) # + colab={} colab_type="code" id="7u-UGgG7X882" # Download results from google.colab import files files.download(results_file)
models/BERT_models/model_run_script.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=false editable=false run_control={"frozen": true} # result_dict = {'a': 13, # 'b': {'datadir': '/tmp/minst', # 'batch_size': 1024, # 'num_workers': 4, # 'learning_rate': 0.01, # 'momentum': 0.5, # 'num_epochs': 20, # 'num_cores': 8, # 'log_steps': 20, # 'metrics_debug': False}, # 'c': [1, 2, 4]} # with open('result.json', 'w') as f: # json.dump(result_dict, f, indent=4, separators=(',', ': ')) # #with open('result.json', 'r') as f: # # d = json.load(f) # # ''' # json.dumps: dict -> str # json.dump: dict -> file # json.loads: str -> dict # json.load: file -> dict # ''' # + deletable=false editable=false run_control={"frozen": true} # import json # from pprint import pprint # # data = {'a':1, 'b':2, 'c':3, 'd':4, 'e':5} # print(data) # pprint(data) # data2 = json.dumps(data) # pprint(data2) # # data2 = json.dumps({'a':'Runoob', 'b':7}, sort_keys=True, indent=4, separators=(',', ': ')) # print(data2) # data2 = json.dumps({'a':'Runoob', 'b':7}, sort_keys=True, indent=6, separators=(',', ':')) # print(data2) # + # train.py import pandas as pd import torch import time import numpy as np import warnings from gensim.models.word2vec import Word2Vec from model import BatchProgramCC from sklearn.metrics import precision_recall_fscore_support warnings.filterwarnings('ignore') def get_device(): if torch.cuda.is_available(): if torch.cuda.get_device_name(0) == 'GeForce GT 730': device = 'cpu' else: device = 'cuda' else: device = 'cpu' return torch.device(device) device = get_device() def get_batch(dataset, idx, bs): tmp = dataset.iloc[idx: idx+bs] x1, x2, labels = [], [], [] for _, item in tmp.iterrows(): x1.append(item['code_x']) x2.append(item['code_y']) labels.append([item['label']]) return x1, x2, torch.FloatTensor(labels) def train(model, train_data, batch_size, device, epochs, epoch, optimizer): total_acc = 0.0 total_loss = 0.0 total = 0.0 i = 0 total_trues = 0 while i < len(train_data): train1_inputs, train2_inputs, train_labels = get_batch(train_data, i, batch_size) i += len(train_labels) train_labels = train_labels.to(device) model.zero_grad() model.batch_size = len(train_labels) model.hidden = model.init_hidden() output = model(train1_inputs, train2_inputs) #print(output.shape) loss = loss_function(output, train_labels) loss.backward() optimizer.step() # acc total_loss += loss.item() * len(train_labels) predicted = np.where(output.data.cpu().numpy()<0.5, 1, 0) total_trues += (predicted==train_labels.cpu().numpy()).sum() total_acc = total_trues / i print('[Epoch:%3d/%3d] [data: %d/%d] Training Loss: %.4f Training Acc: %.4f%%' % (epoch + 1, epochs, i, len(train_data), loss, total_acc*100)) return total_loss/len(train_data), total_acc def validation(model, validation_data, batch_size, device, epochs, epoch): trues = [] total_trues = 0 total_loss = 0.0 i = 0 total_trues = 0 with torch.no_grad(): while i < len(validation_data): validation1_inputs, validation2_inputs, validation_labels = get_batch(validation_data, i, batch_size) i += len(validation_labels) test_labels = validation_labels.to(device) model.batch_size = len(validation_labels) model.hidden = model.init_hidden() output = model(validation1_inputs, validation2_inputs) loss = loss_function(output, validation_labels) # calc testing acc predicted = (output.data < 0.5).cpu().numpy() trues_ex = test_labels.cpu().numpy() trues.extend(trues_ex) total_trues += np.where(predicted==trues_ex, 1, 0).sum() total_loss += loss.item() * len(validation_labels) total_loss /= len(validation_data) total_acc = total_trues / len(validation_data) print('[Epoch:%3d/%3d] Validation Loss: %.4f Validation Acc: %.4f%%' % (epoch + 1, epochs, total_loss, total_acc*100)) return total_loss, total_acc def test(model, test_data, batch_size, device): global precision, recall, f1 predicts = [] trues = [] total_trues = 0 total_loss = 0.0 i = 0 total_trues = 0 with torch.no_grad(): while i < len(test_data): test1_inputs, test2_inputs, test_labels = get_batch(test_data, i, batch_size) i += len(test_labels) test_labels = test_labels.to(device) model.batch_size = len(test_labels) model.hidden = model.init_hidden() output = model(test1_inputs, test2_inputs) loss = loss_function(output, test_labels) # calc testing acc predicted = (output.data < 0.5).cpu().numpy() predicts.extend(predicted) trues_ex = test_labels.cpu().numpy() trues.extend(trues_ex) total_trues += np.where(predicted==trues_ex, 1, 0).sum() total_loss += loss.item() * len(test_labels) total_acc = total_trues / len(test_data) if lang == 'java': weights = [0, 0.005, 0.001, 0.002, 0.010, 0.982] p, r, f, _ = precision_recall_fscore_support(trues, predicts, average='binary') precision += weights[t] * p recall += weights[t] * r f1 += weights[t] * f print("Type-" + str(t) + ": " + str(p) + " " + str(r) + " " + str(f)) else: precision, recall, f1, _ = precision_recall_fscore_support(trues, predicts, average='binary') print("Testing results(P,R,F1):%.3f, %.3f, %.3f" % (precision, recall, f1)) return total_loss, total_acc, precision, recall, f1 # + deletable=false editable=false run_control={"frozen": true} # root = 'data/' # lang = 'java' # categories = 1 # if lang == 'java': # categories = 5 # print("Train for ", str.upper(lang)) # train_data = pd.read_pickle(root+lang+'/train/blocks.pkl').sample(frac=1.0) # validation_data = pd.read_pickle(root+lang+'/dev/blocks.pkl').sample(frac=1.0) # test_data = pd.read_pickle(root+lang+'/test/blocks.pkl').sample(frac=1.0) # # word2vec = Word2Vec.load(root+lang+"/train/embedding/node_w2v_128").wv # MAX_TOKENS = word2vec.vectors.shape[0] # EMBEDDING_DIM = word2vec.vectors.shape[1] # embeddings = np.zeros((MAX_TOKENS + 1, EMBEDDING_DIM), dtype="float32") # embeddings[:MAX_TOKENS] = word2vec.vectors # + deletable=false editable=false run_control={"frozen": true} # root = 'data/' # lang = 'java' # categories = 1 # if lang == 'java': # categories = 5 # print("Train for ", str.upper(lang)) # train_data = pd.read_pickle(root+lang+'/train/blocks.pkl').sample(frac=1.0) # validation_data = pd.read_pickle(root+lang+'/dev/blocks.pkl').sample(frac=1.0) # test_data = pd.read_pickle(root+lang+'/test/blocks.pkl').sample(frac=1.0) # # from gensim.models.word2vec import Word2Vec # #model= Word2Vec(min_count=4, size=200, workers=6, max_final_vocab=1000000) # #model.load('./word2vec_Model/word2vec') # word2vec = Word2Vec.load('./word2vec_Model/word2vec').wv # # MAX_TOKENS = word2vec.vectors.shape[0] # EMBEDDING_DIM = word2vec.vectors.shape[1] # embeddings = np.zeros((MAX_TOKENS + 1, EMBEDDING_DIM), dtype="float32") # embeddings[:MAX_TOKENS] = word2vec.vectors # + root = 'data/' lang = 'java' categories = 1 if lang == 'java': categories = 5 print("Train for ", str.upper(lang)) train_data = pd.read_pickle(root+lang+'/train/blocks.pkl').sample(frac=1.0) validation_data = pd.read_pickle(root+lang+'/dev/blocks.pkl').sample(frac=1.0) test_data = pd.read_pickle(root+lang+'/test/blocks.pkl').sample(frac=1.0) word2vec = Word2Vec.load(root+lang+"/train/embedding/node_w2v_128").wv MAX_TOKENS = word2vec.vectors.shape[0] EMBEDDING_DIM = word2vec.vectors.shape[1] embeddings = np.zeros((MAX_TOKENS + 1, EMBEDDING_DIM), dtype="float32") embeddings[:MAX_TOKENS] = word2vec.vectors # - from torch import nn from torch.nn import functional as F class ContrastiveLoss(nn.Module): def __init__(self, margin=2.0): super(ContrastiveLoss, self).__init__() self.margin = margin def forward(self, enclidean_distance, label): #enclidean_distance = F.pairwise_distance(output[0], output[1]) loss_contrastive = torch.mean(label*torch.pow(enclidean_distance, 2) + (1-label) * torch.pow(torch.clamp(self.margin - enclidean_distance, min=0.0), 2)) return loss_contrastive # + HIDDEN_DIM = 100 ENCODE_DIM = 128 LABELS = 1 EPOCHS = [2,2,5,5,10] BATCH_SIZE = 128 model = BatchProgramCC(EMBEDDING_DIM, HIDDEN_DIM, MAX_TOKENS+1, ENCODE_DIM, LABELS, BATCH_SIZE, device, embeddings) model.to(device) parameters = model.parameters() optimizer = torch.optim.Adamax(parameters) #loss_function = torch.nn.BCELoss() loss_function = ContrastiveLoss(2.0) best_model_state_dict = model.state_dict() train_loss = [] val_loss = [] train_acc = [] val_acc = [] test_loss = [] test_acc = [] test_p = [] test_r = [] test_f = [] best_acc = 0.0 precision, recall, f1 = 0, 0, 0 print('Start training...') for t in range(1, categories+1): if lang == 'java': train_data_t = train_data[train_data['label'].isin([t, 0])] train_data_t.loc[train_data_t['label'] > 0, 'label'] = 1 validation_data_t = validation_data[validation_data['label'].isin([t, 0])] validation_data_t.loc[validation_data_t['label'] > 0, 'label'] = 1 test_data_t = test_data[test_data['label'].isin([t, 0])] test_data_t.loc[test_data_t['label'] > 0, 'label'] = 1 else: train_data_t, validation_data_t, test_data_t = train_data, validation_data, test_data for epoch in range(0, EPOCHS[t-1]): start_time = time.time() model.train() total_loss, total_acc = train(model, train_data_t, BATCH_SIZE, device, EPOCHS[t-1], epoch, optimizer) train_loss.append(total_loss) train_acc.append(total_acc) print("Testing-%d..."%t) model.eval() total_loss, total_acc = validation(model, validation_data_t, BATCH_SIZE, device, EPOCHS[t-1], epoch) val_loss.append(total_loss) val_acc.append(total_acc) if total_acc > best_acc: best_acc = total_acc best_model_state_dict = model.state_dict() end_time = time.time() print("Time used: {}s".format(end_time-start_time)) torch.save(model.state_dict(), 'code_clone_detection_java_model_train_with_ContrastiveLoss_on_java_label_' + str(t) + '_epoch_' + str(EPOCHS[t-1]) + '.pt') total_loss, total_acc, precision, recall, f1 = test(model, test_data_t, BATCH_SIZE, device) test_loss.append(total_loss) test_acc.append(total_acc) test_p.append(precision) test_r.append(recall) test_f.append(f1) #torch.save(model.state_dict(), 'code_clone_detection_java_model_train_on_30_percent.pt') #model.load_state_dict(best_model_state_dict) #model.eval() #test(model, test_data, BATCH_SIZE*2, device) #print("Total testing results(P,R,F1):%.3f, %.3f, %.3f" % (precision, recall, f1)) #torch.save(model.state_dict(), 'code_clone_detection_model.pt') # + import matplotlib.pyplot as plt #epochs = len(hist.history['loss'][9:]) plt.figure(figsize=(16, 9)) plt.plot(range(len(train_loss)), train_loss, label='loss') plt.plot(range(len(val_loss)), val_loss, label='val_loss') plt.plot(range(len(test_loss)), test_loss, label='test_loss') plt.legend() plt.xlabel('epochs') plt.ylabel('loss') plt.show() plt.figure(figsize=(16, 9)) plt.plot(range(len(train_acc)), train_acc, label='accuracy') plt.plot(range(len(val_acc)), val_acc, label='val_accuracy') plt.plot(range(len(test_acc)), test_acc, label='test_accuracy') plt.legend() plt.xlabel('epochs') plt.ylabel('accuracy') plt.show() plt.figure(figsize=(16, 9)) plt.plot(range(len(test_p)), test_p, label='precision') plt.plot(range(len(test_r)), test_r, label='recall') plt.plot(range(len(test_f)), test_f, label='f1') plt.legend() plt.xlabel('epochs') plt.ylabel('accuracy') plt.show() # + deletable=false editable=false run_control={"frozen": true} # import pandas as pd # import os # import sys # #import warnings # #warnings.filterwarnings('ignore') # class Pipeline: # def __init__(self, ratio, root, language): # self.ratio = ratio # self.root = root # self.language = language # self.sources = None # self.blocks = None # self.pairs = None # self.train_file_path = None # self.dev_file_path = None # self.test_file_path = None # self.size = None # # self.seen_ids = None # self.unseen_ids = None # # # parse source code # def parse_source(self, output_file, option): # path = self.root+self.language+'/'+output_file # if os.path.exists(path) and option == 'existing': # source = pd.read_pickle(path) # else: # if self.language == 'c': # from pycparser import c_parser # parser = c_parser.CParser() # source = pd.read_pickle(self.root+self.language+'/programs.pkl') # source.columns = ['id', 'code', 'label'] # source['code'] = source['code'].apply(parser.parse) # source.to_pickle(path) # else: # import javalang # def parse_program(func): # tokens = javalang.tokenizer.tokenize(func) # parser = javalang.parser.Parser(tokens) # tree = parser.parse_member_declaration() # return tree # source = pd.read_csv(self.root+self.language+'/bcb_funcs_all.tsv', sep='\t', header=None, encoding='utf-8') # source.columns = ['id', 'code'] # source.loc[15167, 'code'] = source['code'][15167]+'\r */' # source['code'] = source['code'].apply(parse_program) # source.to_pickle(path) # self.sources = source # return source # # # create clone pairs # def read_pairs(self, filename): # pairs = pd.read_pickle(self.root+self.language+'/'+filename) # self.pairs = pairs # # # split data for training, developing and testing # def split_data(self): # data_path = self.root+self.language+'/' # data = self.pairs # data_num = len(data) # ratios = [int(r) for r in self.ratio.split(':')] # train_split = int(ratios[0]/sum(ratios)*data_num) # val_split = train_split + int(ratios[1]/sum(ratios)*data_num) # # data = data.sample(frac=1, random_state=666) # train = data.iloc[:train_split] # dev = data.iloc[train_split:val_split] # test = data.iloc[val_split:] # # 好像这里并没能保证 train 和 test 不相交 # # def check_or_create(path): # if not os.path.exists(path): # os.mkdir(path) # train_path = data_path+'train/' # check_or_create(train_path) # self.train_file_path = train_path+'train_.pkl' # train.to_pickle(self.train_file_path) # # dev_path = data_path+'dev/' # check_or_create(dev_path) # self.dev_file_path = dev_path+'dev_.pkl' # dev.to_pickle(self.dev_file_path) # # test_path = data_path+'test/' # check_or_create(test_path) # self.test_file_path = test_path+'test_.pkl' # test.to_pickle(self.test_file_path) # # train_ids = train['id1'].append(train['id2']).unique() # dev_ids = dev['id1'].append(dev['id2']).unique() # test_ids = test['id1'].append(test['id2']).unique() # # import numpy as np # self.seen_ids = np.unique(np.hstack([train_ids, dev_ids])) # self.unseen_ids = np.setdiff1d(self.sources['id'].unique(), self.seen_ids) # # # construct dictionary and train word embedding # def dictionary_and_embedding(self, input_file, size): # self.size = size # data_path = self.root+self.language+'/' # if not input_file: # input_file = self.train_file_path # pairs = pd.read_pickle(input_file) # train_ids = pairs['id1'].append(pairs['id2']).unique() # # temp = self.sources.set_index('id',drop=False) # trees = temp.loc[temp.index.intersection(train_ids)] # if not os.path.exists(data_path+'train/embedding'): # os.mkdir(data_path+'train/embedding') # if self.language == 'c': # sys.path.append('../') # from prepare_data import get_sequences as func # else: # from utils import get_sequence as func # # def trans_to_sequences(ast): # sequence = [] # func(ast, sequence) # return sequence # corpus = trees['code'].apply(trans_to_sequences) # str_corpus = [' '.join(c) for c in corpus] # trees['code'] = pd.Series(str_corpus) # # trees.to_csv(data_path+'train/programs_ns.tsv') # # from gensim.models.word2vec import Word2Vec # w2v = Word2Vec(corpus, size=size, workers=16, sg=1, max_final_vocab=3000) # w2v.save(data_path+'train/embedding/node_w2v_' + str(size)) # # # generate block sequences with index representations # def generate_block_seqs(self): # if self.language == 'c': # from prepare_data import get_blocks as func # else: # from utils import get_blocks_v1 as func # from gensim.models.word2vec import Word2Vec # # word2vec = Word2Vec.load(self.root+self.language+'/train/embedding/node_w2v_' + str(self.size)).wv # vocab = word2vec.vocab # max_token = word2vec.vectors.shape[0] # # def tree_to_index(node): # token = node.token # result = [vocab[token].index if token in vocab else max_token] # children = node.children # for child in children: # result.append(tree_to_index(child)) # return result # # def trans2seq(r): # blocks = [] # func(r, blocks) # tree = [] # for b in blocks: # btree = tree_to_index(b) # tree.append(btree) # return tree # trees = pd.DataFrame(self.sources, copy=True) # trees['code'] = trees['code'].apply(trans2seq) # if 'label' in trees.columns: # trees.drop('label', axis=1, inplace=True) # self.blocks = trees # # # merge pairs # def merge(self,data_path,part): # pairs = pd.read_pickle(data_path) # pairs['id1'] = pairs['id1'].astype(int) # pairs['id2'] = pairs['id2'].astype(int) # df = pd.merge(pairs, self.blocks, how='left', left_on='id1', right_on='id') # df = pd.merge(df, self.blocks, how='left', left_on='id2', right_on='id') # df.drop(['id_x', 'id_y'], axis=1,inplace=True) # df.dropna(inplace=True) # # df.to_pickle(self.root+self.language+'/'+part+'/blocks.pkl') # # # def generate_query_source(self): # self.sources['block'] = self.blocks['code'] # self.sources.drop(columns=['code'], axis=1, inplace=True) # self.sources = self.sources.set_index('id') # self.query_source = self.sources.loc[self.sources.index.intersection(self.seen_ids)] # self.unseen_source = self.sources.loc[self.sources.index.intersection(self.unseen_ids)] # self.query_source.to_pickle(self.root+self.language+'/query_source.pkl') # self.unseen_source.to_pickle(self.root+self.language+'/unseen_source.pkl') # # # run for processing data to train # def run(self): # print('parse source code...') # self.parse_source(output_file='ast.pkl',option='existing') # print('read id pairs...') # if self.language == 'c': # self.read_pairs('oj_clone_ids.pkl') # else: # self.read_pairs('bcb_pair_ids.pkl') # print('split data...') # self.split_data() # print('train word embedding...') # self.dictionary_and_embedding(None,128) # print('generate block sequences...') # self.generate_block_seqs() # print('merge pairs and blocks...') # self.merge(self.train_file_path, 'train') # self.merge(self.dev_file_path, 'dev') # self.merge(self.test_file_path, 'test') # print('generate query source...') # self.generate_query_source() # # # ppl = Pipeline('3:1:1', 'data/', 'java') # ppl.run()
clone/Code clone detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import logging, gensim, bz2 logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) # ## LSI # extract 400 LSI topics; use the default one-pass algorithm lsi = gensim.models.lsimodel.LsiModel.load('../data/lsi.model') # print the most contributing words (both positively and negatively) for each of the first ten topics lsi.print_topics(5) # ## LDA lda = gensim.models.ldamodel.LdaModel.load('../data/lda.model') lda.print_topics(5) # ## Word2Vec model = gensim.models.Word2Vec.load("../data/enwiki-20170820-pages-articles-word2vec.model") model.most_similar("queen") model.most_similar("china") model.similarity("china", "korea") model.doesnt_match("china korea japan sandwich".split()) model.most_similar(positive=['woman', 'king'], negative=['man'], topn=5) model.most_similar(positive=['hitler', 'stalin'], topn=5) model.most_similar(positive=['germany', 'russia'], topn=5) model.most_similar(positive=['tv', 'television'], negative=['screen'], topn=5) model.most_similar(positive=['music', 'canvas'], negative=['instrument'], topn=5)
notebook/Loading.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import yfinance as yf import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (10,5) plt.rcParams['figure.autolayout'] = True plt.rcParams['lines.linewidth'] = 3 plt.rcParams['axes.grid'] = True plt.style.use('fivethirtyeight') # - yf_prices = yf.download(['AAPL', 'MSFT', 'GOOG', 'FB'], start='2015-01-01') # # 1. Single Asset Simulation prices = yf_prices['Adj Close']['AAPL'] rs = prices.apply(np.log).diff(1).fillna(0) rs w1 = 5 w2 = 22 ma_x = prices.rolling(w1).mean() - prices.rolling(w2).mean() # + pos = ma_x.apply(np.sign) fig, ax = plt.subplots(2,1) ma_x.plot(ax=ax[0], title='Moving Average Cross-Over') pos.plot(ax=ax[1], title='Position') plt.savefig('tmp.png') # - my_rs = pos.shift(1)*rs my_rs.cumsum().apply(np.exp).plot(title='Strategy Performance') # rs.cumsum().apply(np.exp).plot() # plt.legend(['my performance', 'benchmark performance']) plt.savefig('tmp.png') # # 2. Multi-Assets Simulation prices = yf_prices['Adj Close'] rs = prices.apply(np.log).diff(1).fillna(0) w1 = 5 w2 = 22 ma_x = prices.rolling(w1).mean() - prices.rolling(w2).mean() # + pos = ma_x.apply(np.sign) pos /= pos.abs().sum(1).values.reshape(-1,1) fig, ax = plt.subplots(2,1) ma_x.plot(ax=ax[0], title='Moving Average Cross-Overs') ax[0].legend(bbox_to_anchor=(1.1, 1.05)) pos.plot(ax=ax[1], title='Positions') ax[1].legend(bbox_to_anchor=(1.1, 1.05)) plt.savefig('tmp.png') # + my_rs = (pos.shift(1)*rs) my_rs.cumsum().apply(np.exp).plot(title='Strategy Performance') # rs.mean(1).cumsum().apply(np.exp).plot() # plt.legend(['my performance', 'benchmark performance']) plt.savefig('tmp.png') # + my_rs = (pos.shift(1)*rs).sum(1) my_rs.cumsum().apply(np.exp).plot(title='Strategy Performance') # rs.mean(1).cumsum().apply(np.exp).plot() # plt.legend(['my performance', 'benchmark performance']) plt.savefig('tmp.png') # - # **Look-ahead bias** # + my_rs1 = (pos*rs).sum(1) my_rs2 = (pos.shift(1)*rs).sum(1) my_rs1.cumsum().apply(np.exp).plot(title='Look-Ahead Bias Performance') my_rs2.cumsum().apply(np.exp).plot() plt.legend(['With Look-Ahead Bias', 'Without Look-Ahead Bias']) # rs.mean(1).cumsum().apply(np.exp).plot() # plt.legend(['my performance', 'benchmark performance']) plt.savefig('tmp.png') # - # # 3. Evaluating Strategy Robustness # + lags = range(1, 11) lagged_rs = pd.Series(dtype=float, index=lags) for lag in lags: my_rs = (pos.shift(lag)*rs).sum(1) my_rs.cumsum().apply(np.exp).plot() lagged_rs[lag] = my_rs.sum() plt.title('Strategy Performance with Lags') plt.legend(lags, bbox_to_anchor=(1.1, 0.95)) plt.savefig('tmp.png') # - # # 4. Simulating Transaction Costs tc_pct = 0.01 delta_pos = pos.diff(1).abs().sum(1) my_tcs = tc_pct*delta_pos # + my_rs1 = (pos.shift(1)*rs).sum(1) my_rs2 = (pos.shift(1)*rs).sum(1) - my_tcs my_rs1.cumsum().apply(np.exp).plot() my_rs2.cumsum().apply(np.exp).plot() plt.legend(['without transaction costs', 'with transaction costs']) plt.savefig('tmp.png') # -
10-Financial_Machine_Learning/Vectorized Backtesting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Python, `SymPy`, and $\LaTeX$ # + # %matplotlib inline import sympy as sp import numpy as np import matplotlib.pyplot as plt # - # # Symbolic Mathematics (`SymPy`) sp.init_printing() # Turns on pretty printing np.sqrt(8) sp.sqrt(8) # ### You have to explicitly tell `SymPy` what symbols you want to use x, y, z = sp.symbols('x y z') my_equation = 2 * x + y my_equation my_equation + 3 my_equation - x my_equation / x # ### `SymPy` has all sorts of ways to manipulates symbolic equations sp.simplify(my_equation / x) another_equation = (x + 2) * (x - 3) another_equation sp.expand(another_equation) # + yet_another_equation = 2 * x**2 + 5 * x + 3 sp.factor(yet_another_equation) # - sp.solve(yet_another_equation,x) long_equation = 2*y*x**3 + 12*x**2 - x + 3 - 8*x**2 + 4*x + x**3 + 5 + 2*y*x**2 + x*y long_equation sp.collect(long_equation,x) sp.collect(long_equation,y) # ### `SymPy` can do your calculus homework. yet_another_equation sp.diff(yet_another_equation,x) sp.diff(yet_another_equation,x,2) sp.integrate(yet_another_equation,x) sp.integrate(yet_another_equation,(x,0,5)) # limits x = 0 to 5 # ### System of 3 equations example # $$ # \begin{array}{c} # x + 3y + 5z = 10 \\ # 2x + 5y + z = 8 \\ # 2x + 3y + 8z = 3 \\ # \end{array} # \hspace{3cm} # \left[ # \begin{array}{ccc} # 1 & 3 & 5 \\ # 2 & 5 & 1 \\ # 2 & 3 & 8 # \end{array} # \right] # \left[ # \begin{array}{c} # x\\ # y\\ # z # \end{array} # \right] # = # \left[ # \begin{array}{c} # 10\\ # 8\\ # 3 # \end{array} # \right] # $$ # + AA = sp.Matrix([[1,3,5],[2,5,1],[2,3,8]]) bb = sp.Matrix([[10],[8],[3]]) print(AA**-1) print(AA**-1 * bb) # - # ### `SymPy` can do *so* much more. It really is magic. Complete documentation can be found [here](http://docs.sympy.org/latest/index.html) # --- # # ## Python uses the $\LaTeX$ language to typeset equations. # + active="" # Most LaTeX commands are prefixed with a "\". For example \pi is the # command to produce the lower case Greek letter pi. # # The characters # $ % & ~ _ ^ \ { } are special characters in LaTeX. If # you want to typeset them you need to put a \ in front of them. For # example \$ will typeset the symbol $ # # % - comment. Everything is ignored after a % # $ - Math mode. Start and Stop math mode. $\pi$ # ^ - Superscript in Math mode. $2^2$ # _ - Subscript in Math mode. $2_2$ # - # ### Use a single set of `$` to make your $\LaTeX$ inline and a double set `$$` to center # + active="" # $$ \int \cos(x)\ dx = \sin(x) $$ # - # ### This code will produce the output: # ### $$ \int \cos(x)\ dx = \sin(x) $$ # ## Use can use $\LaTeX$ in plots: # + plt.style.use('ggplot') x = np.linspace(0,2*np.pi,100) y = np.sin(5*x) * np.exp(-x) plt.plot(x,y) plt.title("The function $y\ =\ \sin(5x)\ e^{-x}$") plt.xlabel("This is in units of 2$\pi$") plt.text(2.0, 0.4, '$\Delta t = \gamma\, \Delta t$', color='green', fontsize=36) # - # ## Use can use `SymPy` to make $\LaTeX$ equations for you! # + a = 1/( ( z + 2 ) * ( z + 1 ) ) print(sp.latex(a)) # - # $$ \frac{1}{\left(z + 1\right) \left(z + 2\right)} $$ print(sp.latex(sp.Integral(z**2,z))) # $$ \int z^{2}\, dz $$ # ## `Astropy` can output $\LaTeX$ tables from astropy.io import ascii from astropy.table import QTable my_table = QTable.read('Zodiac.csv', format='ascii.csv') my_table[0:3] ascii.write(my_table, format='latex') # --- # # ## Some websites to open up for class: # - ## [Special Relativity](https://en.wikipedia.org/wiki/Special_relativity) # # --- # # - ## [ShareLatex](https://www.sharelatex.com/) # # - ## [ShareLatex Docs and Help](https://www.sharelatex.com/learn) # # - ## [Latex Symbols](https://en.wikipedia.org/wiki/Wikipedia:LaTeX_symbols) # # - ## [Latex draw symbols](http://detexify.kirelabs.org/classify.html) # # - ## [The SAO/NASA Astrophysics Data System](https://ui.adsabs.harvard.edu/#classic-form) # # --- # # - ## [Latex wikibook](https://en.wikibooks.org/wiki/LaTeX) # --- # # # Assignment for Week 9 # + active="" # ----------------------------------------------------------------------------- # LaTeX homework - Create a LaTeX document with references. # ----------------------------------------------------------------------------- # # Start with the file: Example.tex # # Minimum required elements: # # * Between 2 and 4 pages in length (pages > 4 will be ignored). # # * At least two paragraphs of text (the text should be coherent). # # * At least 5 references from ADS # # * http://adsabs.harvard.edu/abstract_service.html # * Make sure to \cite{} the references in your paper # # * The equation on the back of the handout # # * One (or more) equation(s) of your choice. # # * One Compulsory plot generated with python (png format) # # * Start with t = np.linspace(0,12*np.pi,2000) # * generate a butterfly plot: http://en.wikipedia.org/wiki/Butterfly_curve_(transcendental) # * I have started it for you im the last cell of this notebook # # * One other plot/image (do not reuse and old one!) # # * One table of at least 4 columns and 4 rows. # # * Bonus points given for interesting content! # # ----------------------------------------------------------------------------- # # Create a PDF file: # # Save the file as FirstLast.pdf (i.e. TobySmith.pdf) # # Upload the PDF to the class canvas site # # ----------------------------------------------------------------------------- # Deadline: Tuesday Mar 07 - 5pm (-5 pts for each 30 minutes late) # ----------------------------------------------------------------------------- # + t = np.linspace(0,12*np.pi,2000) fig,ax = plt.subplots(1,1) # One window fig.set_size_inches(11,8.5) # (width,height) - letter paper landscape fig.tight_layout() # Make better use of space on plot
09_Python_LaTeX.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import tensorflow as tf import pickle import glob import pandas as pd import os import sys sys.path.append('../methods') import likelihood as lh import ddm from tqdm.notebook import tqdm # - # ## Training # # We train one model per week, from week 11 to week 52, using the previous ten weeks as training. We train one model per band. We train on non-anomalous image, so first we remove all known anomalies from the training set. # # `time_periods.p` should can be generated with the `ddm_setup.ipynb` notebook. with open('../local/time_periods.p', 'rb') as f: periods = pickle.load(f) # + truth_df = pd.read_csv('../local/truth.csv') positive_locs = truth_df[truth_df.label == True].location_name.to_numpy() for loc in positive_locs: periods.pop(loc, None) # - def get_band_images(band, inds, week, basis_length): """Get training and test images from periods dictionary, ending at given week, in given band, and extending basis_length weeks backwards. Args: band (float): either 'r', 'g', or 'b' inds (ndarray): indices of images in dictionary week (int): week of last (test) image basis_length (int): number of previous weeks to get. Returns: basis (tf.tensor): basis_length number of images for each index prior to week. shape = (len(inds), basis_length, dim_x, dim_y). test (tf.tensor): image at given week for each index. shape = (len(inds), 1, dim_x, dim_y, 1) """ imgs = np.array(list(periods.keys()))[inds] basis = [] test = [] for im in imgs: basis.append( [periods[im][f'week_{w}'][band] for w in range(week-basis_length,week)] ) test.append(periods[im][f"week_{week}"][band]) basis = tf.convert_to_tensor(np.array(basis), dtype=float) basis = tf.reshape( basis, [basis.shape[0], basis.shape[1], basis.shape[2], basis.shape[3], 1] ) test = tf.convert_to_tensor(np.array(test), dtype=float) test = tf.reshape( test, [test.shape[0], 1, test.shape[1], test.shape[2], 1] ) return basis, test # + # Train basis_length = 10 weeks = range(basis_length + 1, 53) n_train_samples = 500 # Number of images to use for training n_locations = len(periods) for w in tqdm(weeks): # skip if model already exists if os.path.exists(f'../local/models/model_week_{w}'): continue # Model for this week w_model = {} # Grab random sample of (negative) images inds = np.random.choice(n_locations, n_train_samples, replace=False) for band in ['r', 'g', 'b']: X, y = get_band_images(band, inds, w, basis_length) w_model[band] = ddm.fit_observation( X, y, num_steps=2000, learning_rate=0.001, reg=0.01, normalization='none' ) with open(f'../local/models/model_week_{w}', 'wb') as f: pickle.dump(w_model, f) # - # ## Predict with open('../local/time_periods.p', 'rb') as f: periods = pickle.load(f) # + # Load all models models = {} model_dir = '../local/models' for m in os.listdir(model_dir): week = m.split('_')[-1] with open(os.path.join(model_dir, m), 'rb') as f: models[int(week)] = pickle.load(f) # - def get_loc_images(band, loc, week, basis_length): """Get images from specific key in periods""" basis = [periods[loc][f'week_{w}'][band] for w in range(week-basis_length,week)] test = periods[loc][f"week_{week}"][band] basis = tf.convert_to_tensor(np.array(basis), dtype=float) basis = tf.reshape( basis, [1, basis.shape[0], basis.shape[1], basis.shape[2], 1] ) test = tf.convert_to_tensor(np.array(test), dtype=float) test = tf.reshape( test, [1, 1, test.shape[0], test.shape[1], 1] ) return basis, test # + results = {} for loc in tqdm(periods.keys()): results[loc] = {} for week in range(11, 53): model_w = models[week] results[loc][week] = {} for band in ['r', 'g', 'b']: model = model_w[band] X, y = get_loc_images(band, loc, week, basis_length) hot_score = ddm.hot_detect(model['gamma'], basis=X, test=y, rmse=model['rmse'], normalization='none', mean=model['mean'], std=model['std'], reduce=True) results[loc][week][band] = hot_score.numpy()[0] # - with open('../local/ddm_results.p', 'wb') as f: pickle.dump(results, f)
notebooks/ddm_train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Pranali-Titvekar-13/LetUpgalrade-Python/blob/master/Assignment_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="pgX9G5K0e-JF" colab_type="code" colab={} #List Methods # + id="p0iGFvPufVmu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ea258bd1-38e3-4d33-e3b5-9b70824720f1" lst =["PC", 5, 72.16] lst.append("BE") #adds item to the end of the list lst # + id="WRJ-_RxhgmHv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fc1f00ae-a90d-4e0c-c911-74520b6affe4" lst.reverse() #reverse the elements of the list in place lst # + id="bHXtDC_RhWXU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="191a56a2-8c17-479d-8e2b-8760b7f07fa6" lst.insert(0,5) # insert the element at given position in list print(lst) print(lst.count(5)) # returns no.of times element appear in the list print(lst.count(72.26)) print(lst.count("PC")) # + id="huqQPWqujtvs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b4ccfc1f-3943-4d9f-aded-a244ce8dfb21" lst.remove(5) lst # + id="_ttWp1elj2jo" colab_type="code" colab={} # Dictionaries # + id="O3XG6fFflwrJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6e09d055-7bdb-4523-9ff2-f73825913436" dit={"nm":"PC", "rn":5, "mks":65.17} dit # + id="i0gXFrgamacB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="286ea179-6fa2-404d-b177-18e2f955e37a" dit.items() #returns key:value pair # + id="NLc_PVKXnElT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="8f13c60b-9579-453c-f4a4-90420c9a789b" print (" Keys in dictionary - ") dit.keys() #returns keys from dict # + id="tvhu1kXInRmf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="52989d24-f364-4fbe-c101-011d45060722" d1={"rn":6} d2={"strm":"BE"} dit.update(d1) #adds or updates element with given key dit.update(d2) dit # + id="UZRGltXHov8U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="632a3783-e680-4eb8-c22b-d8bfa5151750" dit.pop("strm") #pops elemen dit # + id="pzo7QbJNrub3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="13dd3c80-591f-467f-e95d-302aeec535ee" dit.get("nm") #returns value of given key # + id="qA6f1owzsN_r" colab_type="code" colab={} #Sets # + id="cSpPwrTNsWNx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="53a76f06-dcea-4360-c8b5-1dc5b6bc406b" St={"apple","banana","kivi"} St1={"orange","pineapple","kivi"} St.union(St1) #returns set of all elements from both sets by removing duplicate # + id="Gg6NtKqsxfxv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d867d0c6-998b-4d55-a432-71affa70c639" St1.intersection(St) #returns all elements common in both sets # + id="h_hkr_gvxyBY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9bb03508-f16b-4285-9485-1a03511a1439" St.difference(St1) # returns all elements from set St by removing duplicate from both # + id="vwQy1Fi_yx9a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2e6a071d-d97f-4854-8ce3-835ed07f6d12" St.symmetric_difference(St1) #removes common elements and gives rest of the elements # + id="2khMO4ZLzeAJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9822208f-bb77-4d79-f295-63767101b49b" St.isdisjoint(St1) # if no common element then gives true value # + id="r6QQ9TD24W83" colab_type="code" colab={} #Tuple # + id="__r2L_dO4ZPT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="69385895-7e5b-4634-d54a-7997b92e8a45" Tup=("PC",6,72.16,6) Tup # + id="8spp7iWL48g5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8f1c96a7-8aae-45d1-f8d7-4cb2a1bafc13" Tup.count(6) #returns the number of times the element occus in Tuple # + id="resRU5D65MbW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="525093e9-4341-4cb4-a0a5-d15326ef641f" Tup.index(6) # returns the position of the element in Tuple # + id="G14Clob46AK6" colab_type="code" colab={} #Strings # + id="OHV7XNi76CO2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="a8d96818-2abe-4d65-cf1d-ef768c198dee" str="india" str.capitalize() #returns the string with first letter in capital # + id="ZR0dOtpO6ggh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="6e41ea55-2b21-45c4-847b-3144d6ac27ad" str="MAHARASHTRA" str.casefold() #returns caseless copy of a string # + id="HHEBp4xa9YNm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="84f247b5-7da9-41b3-99a0-df6d0cd16c2d" Str="Pranali" print (Str) Str.endswith("i") #returns true if string ends with soecified suffix # + id="FW26ULDO-b45" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d0576eaf-6732-451a-c968-a31b568fc522" Str.find("a") #returns the lowest index in string where the substring found # + id="Hsl0Wjia_Vdf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7443ddef-e657-46fe-cbf2-cc17f91c0d48" Str.isalpha() #returns true if all the characters are alphabetic # + [markdown] id="1SO9BhNkj3ki" colab_type="text" #
Assignment_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Building a Trie in Python # # Before we start let us reiterate the key components of a Trie or Prefix Tree. A trie is a tree-like data structure that stores a dynamic set of strings. Tries are commonly used to facilitate operations like predictive text or autocomplete features on mobile phones or web search. # # Before we move into the autocomplete function we need to create a working trie for storing strings. We will create two classes: # * A `Trie` class that contains the root node (empty string) # * A `TrieNode` class that exposes the general functionality of the Trie, like inserting a word or finding the node which represents a prefix. # # Give it a try by implementing the `TrieNode` and `Trie` classes below! # + ## Represents a single node in the Trie class TrieNode: def __init__(self): self.is_word = False self.children = {} def __iter__(self): for key in self.children: yield key def insert(self, char): self.children[char] = TrieNode() def __repr__(self): return str([c for c in self]) ## The Trie itself containing the root node and insert/find functions class Trie: def __init__(self): self.root = TrieNode() def insert(self, word): """ Add `word` to trie """ current_node = self.root for char in word: if char not in current_node: current_node.insert(char) current_node = current_node.children[char] current_node.is_word = True def exists(self, word): """ Check if word exists in trie """ node = self.find(word) if node: return node.is_word else: return False def find(self, prefix): """ Find the node with the prefix """ current_node = self.root for char in prefix: if char not in current_node: return None current_node = current_node.children[char] return current_node def __repr__(self): return str(self.to_list(self.root, "")) def to_list(self, node, word): if node is None: return [] words = [] for child_key in node: child_node = node.children[child_key] if child_node.is_word: words.append(word + child_key) words += (self.to_list(child_node, word + child_key)) return words # - # # Finding Suffixes # # Now that we have a functioning Trie, we need to add the ability to list suffixes to implement our autocomplete feature. To do that, we need to implement a new function on the `TrieNode` object that will return all complete word suffixes that exist below it in the trie. For example, if our Trie contains the words `["fun", "function", "factory"]` and we ask for suffixes from the `f` node, we would expect to receive `["un", "unction", "actory"]` back from `node.suffixes()`. # # Using the code you wrote for the `TrieNode` above, try to add the suffixes function below. (Hint: recurse down the trie, collecting suffixes as you go.) class TrieNode: def __init__(self): self.is_word = False self.children = {} def __iter__(self): for key in self.children: yield key def insert(self, char): self.children[char] = TrieNode() def __repr__(self): return str([c for c in self]) def suffixes(self): # Recursive function that collects the suffix for # all complete words below this point return self.__suffixes(self) def __suffixes(self, node, word=""): if node is None: return [] words = [] for child_key in node: child_node = node.children[child_key] if child_node.is_word: words.append(word + child_key) words += self.__suffixes(child_node, word + child_key) return words # # Testing it all out # # Run the following code to add some words to your trie and then use the interactive search box to see what your code returns. MyTrie = Trie() wordList = [ "ant", "anthology", "antagonist", "antonym", "fun", "function", "factory", "trie", "trigger", "trigonometry", "tripod" ] for word in wordList: MyTrie.insert(word) from ipywidgets import widgets from IPython.display import display from ipywidgets import interact def f(prefix): if prefix != '': prefixNode = MyTrie.find(prefix) if prefixNode: print('\n'.join(prefixNode.suffixes())) else: print(prefix + " not found") else: print('') interact(f,prefix='');
problem_5/problem_5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import altair as alt import deneb as den from vega_datasets import data df = data.barley() den.set_style(font_family="Helvetica Neue") chart = den.lineplot( df, x="variety:O", y="yield:Q", column="site:O", color=alt.Color("year:N", legend=None), errorbars=False, width=140, height=140) chart # - den.save(chart, "lineplot.svg", extra_formats="png") den.display_img("lineplot.png") help(den.lineplot) help(den.set_style)
examples/lineplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # execute this in command line on all machines to be used as workers before initiating the hyperparamer search # # ! pip install -U trains-agent==0.15.0 # # ! trains-agent daemon --queue default # pip install with locked versions # ! pip install -U pandas==1.0.3 # ! pip install -U trains==0.15.0 # ! pip install -U hpbandster==0.7.4 # Needed only for Bayesian optimization Hyper-Band # + from trains.automation import UniformParameterRange, UniformIntegerParameterRange from trains.automation import RandomSearch, HyperParameterOptimizer from trains.automation.hpbandster import OptimizerBOHB # Needed only for Bayesian optimization Hyper-Band from trains import Task # - task = Task.init(project_name='Hyper-Parameter Search', task_name='Hyper-Parameter Optimization') ##################################################################### ### Don't forget to replace this default id with your own task id ### ##################################################################### TEMPLATE_TASK_ID = 'd8e928460f98437c998f3597768597f8' optimizer = HyperParameterOptimizer( base_task_id=TEMPLATE_TASK_ID, # This is the experiment we want to optimize # here we define the hyper-parameters to optimize hyper_parameters=[ UniformIntegerParameterRange('number_of_epochs', min_value=5, max_value=15, step_size=1), UniformIntegerParameterRange('batch_size', min_value=2, max_value=12, step_size=2), UniformParameterRange('dropout', min_value=0, max_value=0.5, step_size=0.05), UniformParameterRange('base_lr', min_value=0.0005, max_value=0.01, step_size=0.0005), ], # this is the objective metric we want to maximize/minimize objective_metric_title='accuracy', objective_metric_series='total', objective_metric_sign='max', # maximize or minimize the objective metric max_number_of_concurrent_tasks=3, # number of concurrent experiments # setting optimizer - trains supports GridSearch, RandomSearch or OptimizerBOHB optimizer_class=OptimizerBOHB, # can be replaced with OptimizerBOHB execution_queue='default', # queue to schedule the experiments for execution optimization_time_limit=30., # time limit for each experiment (optional, ignored by OptimizerBOHB) pool_period_min=1, # Check the experiments every x minutes # set the maximum number of experiments for the optimization. # OptimizerBOHB sets the total number of iteration as total_max_jobs * max_iteration_per_job total_max_jobs=12, # setting OptimizerBOHB configuration (ignored by other optimizers) min_iteration_per_job=15000, # minimum number of iterations per experiment, till early stopping max_iteration_per_job=150000, # maximum number of iterations per experiment ) optimizer.set_time_limit(in_minutes=120.0) # set the time limit for the optimization process optimizer.start() optimizer.wait() # wait until process is done optimizer.stop() # make sure background optimization stopped # optimization is completed, print the top performing experiments id k = 3 top_exp = optimizer.get_top_experiments(top_k=k) print('Top {} experiments are:'.format(k)) for n, t in enumerate(top_exp, 1): print('Rank {}: task id={} |result={}' .format(n, t.id, t.get_last_scalar_metrics()['accuracy']['total']['last']))
examples/frameworks/pytorch/notebooks/image/hyperparameter_search.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Method of Simulated Moments (MSM) # + pycharm={"is_executing": false} import pandas as pd import respy as rp # - # This notebook contains a step by step tutorial to simulated method of moments estimation using respy. # Respy can construct a msm function using `get_msm_func`. The function requires the following arguments: # # * params (pandas.DataFrame) # * options (dict) # * calc_moments (callable, list, dict) # * replace_nans (callable, list, dict) # * empirical_moments (pandas.DataFrame, pandas.Series, list, dict) # * weighting_matrix (numpy.ndarray) # * n_simulation_periods (int, default None) # * return_scalar (bool, default True) # # # `get_msm_func` returns a function where all arguments except *params* are held fixed. The returned function can then easily be passed on to an optimizer for estimation. # ## Introductory Example # # The following section discusses all the arguments in detail using an example model. # ### Arguments # #### The *params* and *options* Arguments # # The first step to msm estimation is the simulation of data using a specified model. Respy simulates data using a vector of parameters *params*, which will be the variable of interest for estimation, and a set of *options* that help define the underlying model. # # Respy provides a number of example models. For this tutorial we will be using the parameterization from Keane and Wolpin (1994). # + pycharm={"is_executing": false} params, options, df_emp = rp.get_example_model("kw_94_one") # + pycharm={"is_executing": false} params # + pycharm={"is_executing": false} options # - # #### The *calc_moments* Argument # # The *calc_moments* argument is the function that will be used to calculate moments from the simulated data. It can also be specified as a list or dictionary of multiple functions if different sets of moments should be calculated from different functions. # # In this case, we will calculate two sets of moments: choice frequencies and parameters that characterize the wage distribution. The moments are saved to a pandas.DataFrame with time periods as the index and the moments as columns. # + pycharm={"is_executing": false} def calc_moments(df): choices = ( df.groupby("Period").Choice.value_counts(normalize=True).unstack() ) wages = df.groupby(["Period"])["Wage"].describe()[["mean", "std"]] return pd.concat([choices, wages], axis=1) # - # #### The *replace_nans* Argument # # Next we define *replace_nans* is a function or list of functions that define how to handle missings in the data. # + pycharm={"is_executing": false} def fill_nans_zero(df): return df.fillna(0) # - # #### The *empirical_moments* Argument # # The empirical moments are the moments that are calculated from the observed data which the simulated moments should be matched to. The *empirical_moments* argument requires a pandas.DataFrame or pandas.Series as inputs. Alternatively, users can input lists or dictionaries containing DataFrames or Series as items. It is necessary that *calc_moments*, *replace_nans* and *empirical_moments* correspond to each other i.e. *calc_moments* should output moments that are of the same structure as *empirical_moments*. # # For this example we calculate the empirical moments the same way that we calculate the simulated moments, so we can be sure that this condition is fulfilled. # + pycharm={"is_executing": false} empirical_moments = calc_moments(df_emp) empirical_moments = fill_nans_zero(empirical_moments) # + pycharm={"is_executing": false} empirical_moments.head() # - # #### The *weighting_matrix* Argument # # For the msm estimation, the user has to define a weighting matrix. `get_diag_weighting_matrix` allows users to create a diagonal weighting matrix that will match the moment vectors used for estimation. The required inputs are *empirical_moments* that are also used in `get_msm_func` and a set of weights that are of the same form as *empirical_moments*. If no weights are specified, the function will return the identity matrix. # + pycharm={"is_executing": false} weighting_matrix = rp.get_diag_weighting_matrix(empirical_moments) # + pycharm={"is_executing": false} pd.DataFrame(weighting_matrix) # - # If the user prefers to compute a weighting matrix manually, the respy function `get_flat_moments` may be of use. This function returns the empirical moments as an indexed pandas.Series which is the form they will be passed on to the loss function as. flat_empirical_moments = rp.get_flat_moments(empirical_moments) flat_empirical_moments # #### The *n_simulation_periods* Argument # # The *n_simulation_periods* is part of the simulator that is constructed by respy in `get_msm_func`. It dictates the number of periods in the simulated dataset and is not to be confused with `options["n_periods"]` which controls the number of periods for which decision rules are computed. If the desired dataset needs to include only a subset of the total number of periods realized in the model, *n_simulation_periods* can be set to a value lower number of periods. # # This argument, if not needed, can be left out when specifying inputs. By default, the simulator will produce a dataset with the number of periods specified in `options["n_periods"]`. # #### The *return_scalar* Argument # # The *return_scalar* argument allows us to return the moment errors in vector form. `get_msm_func` will return the moment error vector if *return_scalar* is set to **False** and will return the value of the weighted square product of the moment errors if *return_scalar* is set to **True** which is also the default. # ### MSM Function # We can now compute the msm function. The function is constructed using `get_msm_func`. Adding all arguments to `get_msm_func` will return a function that holds all elements but the *params* argument fixed and can thus easily be passed on to an optimizer. The function will return a value of 0 if we use the true parameter vector as input. # + pycharm={"is_executing": false} msm = rp.get_msm_func( params=params, options=options, calc_moments=calc_moments, replace_nans=fill_nans_zero, empirical_moments=empirical_moments, weighting_matrix=weighting_matrix, return_scalar=True, ) msm(params) # - # Using a different parameter vector will result in a value different from 0. # + pycharm={"is_executing": false} params_sim = params.copy() params_sim.loc["delta", "value"] = 0.8 # + pycharm={"is_executing": false} msm(params_sim) # - # If we set *return_scalar* to **False**, the function will return the vector of moment errors instead. # + pycharm={"is_executing": false} msm_vector = rp.get_msm_func( params=params_sim, options=options, calc_moments=calc_moments, replace_nans=fill_nans_zero, empirical_moments=empirical_moments, weighting_matrix=weighting_matrix, return_scalar=False, ) moment_errors = msm_vector(params_sim) moment_errors # - # ## Inputs as Lists or Dictionaries # # In the example above we used single elements for all inputs i.e. we used one function to calculate moments, one function to replace missing moments and saved all sets of moments in a single pandas.DataFrame. This works well for the example at hand because the inputs are relatively simple, but other applications might require more flexibility. `get_msm_func` thus alternatively accepts lists and dictionaries as inputs. This way, different sets of moments can be stored separately. Using lists or dictionaries also allows the use of different replacement functions for different moments. # # For the sake of this example, we add another set of moments to the estimation. In addition to the choice frequencies and wage distribution, we include the final education of agents. Here, the index is given by the educational experience agents have accumulated in period 39. The moments are given by the frequency of each level of experience in the dataset. Since this set of moments is not grouped by period, it cannot be saved to a DataFrame with the other moments. We hence give each set of moments its own function and save them to a list. The choice frequencies and wage distribution are saved to a pandas.DataFrame with multiple columns, the final education is given by a pandas.Series. # # Instead of lists, the functions and moments may also be saved to a dictionary. **Dictionaries will be sorted according to keys** before being passed on the loss function. Using dictionaries therefore has the advantage that the user does not have to pay attention to storing items in the correct order as with lists, where inputs are matched according to position. For the same reason it is not recommended to mix lists and dictionaries as inputs. # + pycharm={"is_executing": false} def calc_choice_freq(df): return df.groupby("Period").Choice.value_counts(normalize=True).unstack() def calc_wage_distr(df): return df.groupby(["Period"])["Wage"].describe()[["mean", "std"]] def calc_final_edu(df): last_period = max(df.index.get_level_values(1)) return df.xs(last_period, level=1).Experience_Edu.value_counts( normalize=True, sort=False ) calc_moments = [calc_choice_freq, calc_wage_distr, calc_final_edu] # - # We can additionally specify different replacement functions for each set of moments and save them to a list just like *calc_moments*. However, here we will use the same replacement function for all moments and thus just need to specify one. Respy will automatically apply this function to all sets of moments. # # Note that this only works if only one replacement function is given. Otherwise *replace_nans* must be a list of the same length as *calc_moments* with each replacement function holding the same position as the moment function it corresponds to. In the case of dictionaries, replacement functions should be saved with the same keys as set of moments they correspond to. # + pycharm={"is_executing": false} def fill_nans_zero(df): return df.fillna(0) replace_nans = [fill_nans_zero] # - # We now calculate the *empirical_moments*. They are saved to a list as well. We can calculate the *weighting_matrix* as before. # + pycharm={"is_executing": false} params, options, df = rp.get_example_model("kw_94_one") empirical_moments = [ calc_choice_freq(df), calc_wage_distr(df), calc_final_edu(df), ] empirical_moments = [fill_nans_zero(df) for df in empirical_moments] # + pycharm={"is_executing": false} weighting_matrix = rp.get_diag_weighting_matrix(empirical_moments) # - # Finally, we can construct the msm function from the defined inputs. # + pycharm={"is_executing": false} msm = rp.get_msm_func( params=params, options=options, calc_moments=calc_moments, replace_nans=replace_nans, empirical_moments=empirical_moments, weighting_matrix=weighting_matrix, return_scalar=True, ) msm(params) # - # The result for the simulated moments slightly deviates from the introductory example because we added an additional set of moments. # + pycharm={"is_executing": false} msm(params_sim) # + pycharm={"is_executing": false} msm_vector = rp.get_msm_func( params=params, options=options, calc_moments=calc_moments, replace_nans=replace_nans, empirical_moments=empirical_moments, weighting_matrix=weighting_matrix, return_scalar=False, ) moment_errors = msm_vector(params_sim) moment_errors # - # ## References # # > <NAME>. and <NAME>. (1994). [The Solution and Estimation of Discrete Choice Dynamic Programming Models by Simulation and Interpolation: Monte Carlo Evidence](https://doi.org/10.2307/2109768). *The Review of Economics and Statistics*, 76(4): 648-672. # # > <NAME>. (1989). [A Method of Simulated Moments for Estimation of Discrete Response Models without Numerical Integration](https://jstor.org/stable/1913621). *Econometrica: Journal of the Econometric Society*, 995-1026. #
docs/getting_started/tutorial-msm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lists l = [1,2,3] type(l) l = [1,2,'String', 123, 2.0] l print(l) l[0] l[2] type(l[2]) l l[1:3] l[:] # print all the list l[:4] l[-1] l[::-1] l l = l + [3] l 3+3 'this is a string' + 'whoohoo' 'whoohoo' + 3 l = [1,2, 'a', 'b'] l l*3 # ## list methods l l.append('New Item') l a = l.pop() l a popped = l.pop(2) l popped l l = [2,5,3,8,56,0] l.reverse() l l.sort() l # ## Nesting l = [1,2,3,'String', [3,4,5]] type(l[4]) l[4] l[4][1] matrix = [[1,2,3],[4,5,6],[7,8,9]] matrix matrix[1][1] matrix[1].append(7)
03 - lists.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/hltdev8642/Babylon.js/blob/master/glTF.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="api4JZ5nEKmc" colab_type="code" colab={} import numpy as np import base64 # + id="jYJNbOBbB4JE" colab_type="code" colab={} VERTICES = np.array([0.,0.,0., 0.,1.,0., 1.,0.,0.], dtype=np.float32) INDICES = np.array([0, 1, 2], dtype=np.ushort) HOWMANY = 3 MAX_X = 1 MAX_Y = 1 MAX_Z = 0 MIN_X = 0 MIN_Y = 0 MIN_Z = 0 MAX = 2 MIN = 0 HOWMANYBYTES_V = VERTICES.nbytes HOWMANYBYTES_I = INDICES.nbytes B64_VERTICES = base64.b64encode(VERTICES) B64_INDICES = base64.b64encode(INDICES) # + id="GoUiqH6hCIHF" colab_type="code" colab={} gltf = { "asset": { "version": "2.0", "generator": "CS460 Magic Fingers" }, "accessors": [ { "bufferView": 0, "byteOffset": 0, "componentType": 5126, "count": HOWMANY, "type": "VEC3", "max": [MAX_X, MAX_Y, MAX_Z], "min": [MIN_X, MIN_Y, MIN_Z] }, { "bufferView": 1, "byteOffset": 0, "componentType": 5123, "count": HOWMANY, "type": "SCALAR", "max": [MAX], "min": [MIN] } ], "bufferViews": [ { "buffer": 0, "byteOffset": 0, "byteLength": HOWMANYBYTES_V, "target": 34962 }, { "buffer": 1, "byteOffset": 0, "byteLength": HOWMANYBYTES_I, "target": 34963 } ], "buffers": [ { "uri": "data:application/octet-stream;base64,"+str(B64_VERTICES, 'utf-8'), "byteLength": HOWMANYBYTES_V }, { "uri": "data:application/octet-stream;base64,"+str(B64_INDICES, 'utf-8'), "byteLength": HOWMANYBYTES_I } ], "meshes": [ { "primitives": [{ "mode": 4, "attributes": { "POSITION": 0 }, "indices": 1 }] } ], "nodes": [ { "mesh": 0 } ], "scenes": [ { "nodes": [ 0 ] } ], "scene": 0 } # + id="eAlqXO7ACp9m" colab_type="code" outputId="95bec482-8ddb-40ec-b173-c1708dd209d4" colab={"base_uri": "https://localhost:8080/", "height": 54} str(gltf).replace("'", '"') # we need double quotes instead of single quotes # + id="PPXfK6iGGRiC" colab_type="code" colab={}
glTF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="zkhSn6YVSWox" # # Load Data # + colab={"base_uri": "https://localhost:8080/"} id="sU6UdU2NNwbE" executionInfo={"status": "ok", "timestamp": 1622347055273, "user_tz": 240, "elapsed": 234, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="7686644a-11b3-4272-dbba-5e813887ddac" # After executing the cell above, Drive # files will be present in "/content/drive/My Drive". # !ls "/content/drive/My Drive/Udacity-MLE-Capstone-Starbucks-data/" # + id="TkeeTFiGNg_h" import pandas as pd import numpy as np import math import json % matplotlib inline from matplotlib import pyplot as plt filePath = "/content/drive/My Drive/Udacity-MLE-Capstone-Starbucks-data/" # read in the json files portfolio = pd.read_json(filePath+'data/portfolio.json', orient='records', lines=True) profile = pd.read_json(filePath+'data/profile.json', orient='records', lines=True) transcript = pd.read_json(filePath+'data/transcript.json', orient='records', lines=True) # Upload preprocessed data transcriptTransformRec = pd.read_pickle(filePath+'preprocessedData/transcriptTransformRec_v1.pkl') profileClean = pd.read_pickle(filePath+'preprocessedData/profileClean_v1.pkl') portfolioClean = pd.read_pickle(filePath+'preprocessedData/portfolioClean_v1.pkl') transcriptCleanOld = pd.read_pickle(filePath+'preprocessedData/transcriptClean_v1.pkl') # + colab={"base_uri": "https://localhost:8080/", "height": 609} id="w5FcYaelNj5L" executionInfo={"status": "ok", "timestamp": 1622347058499, "user_tz": 240, "elapsed": 22, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="623fdfa6-e51b-4921-ca74-6cab4afc3406" transcriptTransformRec # + colab={"base_uri": "https://localhost:8080/"} id="RkhTyH-jiCKE" executionInfo={"status": "ok", "timestamp": 1622347058500, "user_tz": 240, "elapsed": 20, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="e720ae5c-b9de-4335-beb0-3c5dc4c546dc" # Get indices of completed offers offerCompIdx = transcriptTransformRec[transcriptTransformRec.offer_completed == 1].index.tolist() # Statistics describing the reward amounts transcriptTransformRec[transcriptTransformRec.offer_completed == 1].compTransAmt.describe() # + colab={"base_uri": "https://localhost:8080/"} id="TuhTYxOhj07N" executionInfo={"status": "ok", "timestamp": 1622347058502, "user_tz": 240, "elapsed": 15, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="5f46fa89-f1d7-4ec5-a46a-e482a9afbc26" # Statistics describing the reward amounts transcriptTransformRec[transcriptTransformRec.offer_completed == 1].adjRev.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="K6cGkdClnhH7" executionInfo={"status": "ok", "timestamp": 1622347060538, "user_tz": 240, "elapsed": 2047, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="b46dfa3d-f71f-4102-b1b7-e586afe7863f" fig = plt.figure() ax = transcriptTransformRec[transcriptTransformRec.offer_completed == 1].compTransAmt.plot.hist(bins=1000) plt.xlabel('Transaction Revenue ($)') plt.ylabel('Frequency') plt.title('Distribution of Completed Offer Transaction Revenue') # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="0QajmG_miIjW" executionInfo={"status": "ok", "timestamp": 1622347062905, "user_tz": 240, "elapsed": 2376, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="fbe7faf2-a04b-41b3-fd10-7c5fd3edc0e2" fig = plt.figure() ax = transcriptTransformRec[transcriptTransformRec.offer_completed == 1].compTransAmt.plot.hist(bins=1000) plt.xlabel('Transaction Revenue ($)') plt.ylabel('Frequency') plt.title('Distribution of Completed Offer Transaction Revenue') plt.xlim(0, 60) # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="ZxLzI8onoWI5" executionInfo={"status": "ok", "timestamp": 1622347064936, "user_tz": 240, "elapsed": 2037, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="f6c0c7a7-4ee5-4a50-f75a-d92198bed1c1" fig = plt.figure() ax = transcriptTransformRec[transcriptTransformRec.offer_completed == 1].adjRev.plot.hist(bins=1000) plt.xlabel('Completed Offer Revenue Minus Reward Amount ($)') plt.ylabel('Frequency') plt.title('Distribution of Transaction Amounts') # + [markdown] id="XGYkFbBoVtQE" # # Additional Preprocessing # + colab={"base_uri": "https://localhost:8080/", "height": 359} id="Z8mrM8h7WRGc" executionInfo={"status": "ok", "timestamp": 1622347064938, "user_tz": 240, "elapsed": 58, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="2d60bc46-ed96-410b-c2dc-87aafb82da04" portfolio # + id="_FrjGJ-PVvKT" from sklearn import preprocessing def normalizePorfolio(df): # Initialize a min-max scaler object #scaler = MinMaxScaler() normalized_df=(df-df.min())/(df.max()-df.min()) return normalized_df # + id="ClC-Nu7OV1YF" normalizeColumns = ['difficulty', 'duration', 'reward'] normalizedPorfolio = normalizePorfolio(portfolioClean[normalizeColumns]) # + colab={"base_uri": "https://localhost:8080/", "height": 359} id="Dw4VHrYbV9GI" executionInfo={"status": "ok", "timestamp": 1622347064941, "user_tz": 240, "elapsed": 56, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="6efb6abe-e5b9-4f78-f7bb-423ccd241317" normalizedPorfolio # + colab={"base_uri": "https://localhost:8080/", "height": 379} id="xOlqNoObV-K6" executionInfo={"status": "ok", "timestamp": 1622347064943, "user_tz": 240, "elapsed": 56, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="ea32f4b7-a5f0-46b6-fc94-4daa759d1259" # Create cleaned and normalized 'profile' dataset portfolioClean[normalizeColumns] = normalizedPorfolio[normalizeColumns] portfolioClean # + [markdown] id="jnuhzqa3SZi6" # # Merge Data # + id="NV29a9OhOZzp" def merge_data(portfolio,profile,transcript): """ Merge cleaned data frames for EDA Parameters ---------- portfolio : cleaned portfolio data frame profile : cleaned profile data frame transcript : cleaned transcript data frame Returns ------- merged_df: merged data frame """ #merged_df = pd.merge(transcript, profile, on='customer_id') merged_df = pd.merge(portfolio, transcript, on='offer_id') merged_df = pd.merge(merged_df, profile, on='customer_id') return merged_df # + id="qYaIxWKfSbwM" merged_df = merge_data(portfolioClean,profileClean,transcriptTransformRec) # + colab={"base_uri": "https://localhost:8080/", "height": 309} id="t8Jh1bkKSg2S" executionInfo={"status": "ok", "timestamp": 1622347064945, "user_tz": 240, "elapsed": 54, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="df3445b6-6f32-4253-d502-89fb6f70657b" merged_df.head(5) # + id="HXNcwl4RPfFv" mergedTrain_df = merged_df.loc[(merged_df['offer_completed'] == 1) & (merged_df['offerTrans']< 50)].copy() # + colab={"base_uri": "https://localhost:8080/", "height": 609} id="NJEAIahGRL3J" executionInfo={"status": "ok", "timestamp": 1622347064957, "user_tz": 240, "elapsed": 64, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="5b6aaf79-e8b3-4968-cd70-a24e2643052f" mergedTrain_df # + [markdown] id="9rrYjXSvXHSI" # ## Drop columns not needed for training # + id="K07wD8azN1xk" # Get target variable for training y = mergedTrain_df['adjRev'].copy() # + colab={"base_uri": "https://localhost:8080/"} id="6vxOdn9pSjec" executionInfo={"status": "ok", "timestamp": 1622347065012, "user_tz": 240, "elapsed": 117, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="bfd0d18c-f6f7-4844-ced1-f9de8b84cd60" print(mergedTrain_df.columns) # Rename 'if' to 'customer_id' mergedTrain_df.rename(columns={'reward_x': 'reward'}, inplace=True) # Drop columns not needed for training mergedTrain_df.drop(['offer_id', 'offerType', 'customer_id', 'event', 'amount', 'reward_y', 'time_days', 'joinDate', 'offer_completed', 'offer_viewed', 'offer_compViewed', 'offer_compNotViewed', 'compTransAmt', 'rewardReceived', 'adjRev', 'offerTrans'], axis=1, inplace=True) X = mergedTrain_df.copy() # + colab={"base_uri": "https://localhost:8080/", "height": 439} id="OnW-K5dDT-Fw" executionInfo={"status": "ok", "timestamp": 1622347065321, "user_tz": 240, "elapsed": 49, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="76a8c19e-2296-4f6c-871a-8cfb68ed940e" # View mergedTrain_df mergedTrain_df # + id="WCdcGiAXPZda" # + id="BSnlaGgAxHdb" # Save off data X.to_pickle(filePath+'dataOfferCompAdjRevX.pkl') y.to_pickle(filePath+'dataOfferCompAdjRevY.pkl') # + [markdown] id="icUFTasXlKLt" # ## Define target and feature data # + [markdown] id="A7TcW5KklFZc" # # Preparing and splitting the data # + id="9IddeVGqTV5N" from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler min_max_scaler = preprocessing.MinMaxScaler() X = min_max_scaler.fit_transform(X) # We split the dataset into 2/3 training and 1/3 testing sets. X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2, shuffle=True) # Then we split the training set further into 2/3 training and 1/3 validation sets. X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.2, shuffle=True) # + colab={"base_uri": "https://localhost:8080/"} id="JsZWKZnCdYrm" executionInfo={"status": "ok", "timestamp": 1622347065335, "user_tz": 240, "elapsed": 59, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="a2683b31-4965-4c44-8ee9-47caffbb5546" X_train # + colab={"base_uri": "https://localhost:8080/"} id="NNi0huo-Tgzz" executionInfo={"status": "ok", "timestamp": 1622347065336, "user_tz": 240, "elapsed": 55, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="f17e5aa6-c786-44d4-9600-e0d23241c1df" Y_train # + [markdown] id="3DPLbqaqfICM" # # Train model # + [markdown] id="F8ofbAhXfLKc" # ## Train Model using a gradient boosting algorithm # + [markdown] id="Vtk_cIUAfcGB" # The objective of thise model is to use tranaction, customer, and ad characteristic data to predict whether an offer is completed or not. # + id="anrwkwxHUFuI" from sklearn.ensemble import GradientBoostingClassifier from xgboost import XGBClassifier from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, KFold from sklearn.metrics import mean_squared_error from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import plot_confusion_matrix from sklearn.metrics import roc_curve from sklearn.metrics import RocCurveDisplay from sklearn.metrics import r2_score from sklearn.metrics import max_error from sklearn.metrics import explained_variance_score # Skikit learn gradient boosting classifier #clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1, # max_depth=1, random_state=0) # XGBoost Classifier clf = XGBRegressor() clf.learning_rate = 0.1 clf.n_estimators = 500 clf.objective = 'reg:squarederror' # + colab={"base_uri": "https://localhost:8080/"} id="8mbDPnf0elVh" executionInfo={"status": "ok", "timestamp": 1622347068287, "user_tz": 240, "elapsed": 2996, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="6c167aea-4a07-4402-b2c1-acc0f6e18c1e" clf.fit(X_train, Y_train) # + id="X5V87D8UVcjy" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1622347068288, "user_tz": 240, "elapsed": 11, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="dcfc59d2-0c04-4d66-acec-b49059500dac" clf.score(X_test, Y_test) # + colab={"base_uri": "https://localhost:8080/"} id="i4c5AUh2SdsB" executionInfo={"status": "ok", "timestamp": 1622347096316, "user_tz": 240, "elapsed": 28033, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="f59958fc-faab-4e95-94c8-b647b048fa31" scores = cross_val_score(clf, X_train, Y_train,cv=10) print("Mean cross-validation score: %.2f" % scores.mean()) # + colab={"base_uri": "https://localhost:8080/"} id="2AH6b_iNS1Vg" executionInfo={"status": "ok", "timestamp": 1622347096317, "user_tz": 240, "elapsed": 17, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="cb994b00-6ae4-4295-da8d-1c3d83400efe" # Calculate Mean Squared Error (MSE) ypred = clf.predict(X_test) mse = mean_squared_error(Y_test, ypred) print("MSE: %.2f" % mse) MSE: 3.35 print("RMSE: %.2f" % (mse**(1/2.0))) RMSE: 1.83 # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="S7Gfn1frTAN9" executionInfo={"status": "ok", "timestamp": 1622347096707, "user_tz": 240, "elapsed": 403, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="c1e5cdcc-3180-412b-fc64-7077e27fd8f4" fig1 = plt.figure() x_ax = range(len(Y_test)) plt.plot(x_ax, Y_test, label="original") plt.plot(x_ax, ypred, label="predicted") plt.title("Test and predicted data") plt.xlim(0, 100) plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="EKVXqcI8T7dN" executionInfo={"status": "ok", "timestamp": 1622347097013, "user_tz": 240, "elapsed": 316, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="50270b5a-0d01-48eb-e5a8-b7071c1cd912" fig2 = plt.figure() x_ax = range(len(Y_test)) plt.plot(x_ax, Y_test, label="original") plt.plot(x_ax, ypred, label="predicted") plt.title("Test and predicted data") plt.legend() plt.show() # + [markdown] id="OZprIjZ-CKXl" # ## Model Validation # + id="q-laBM8iVICP" def modelReg_eval(model, X_train, Y_train, X_test, Y_test, X_val, Y_val): ''' Function to evaluate the performance of the regression model.''' print("Model Evaluation:\n") print(model) print('\n') print("Accuracy score (training): {0:.3f}".format(model.score(X_train, Y_train))) print("Accuracy score (test): {0:.3f}".format(model.score(X_test, Y_test))) print("Accuracy score (validation): {0:.3f}".format(model.score(X_val, Y_val))) print('\n') y_pred = model.predict(X_val) ypred = y_pred # Compute mean cross-validation score scores = cross_val_score(model, X_val, Y_val,cv=10) print("Mean cross-validation score: %.2f" % scores.mean()) print("R2 score: %.2f" % r2_score(Y_val,y_pred)) print("Max error: %.2f" % max_error(Y_val,y_pred)) print("Explained Variance Score: %.2f" % explained_variance_score(Y_val,y_pred)) print('\n') # Compute Mean Squared Error (MSE) mse = mean_squared_error(Y_val, ypred) print("MSE: %.2f" % mse) print("RMSE: %.2f" % (mse**(1/2.0))) print('\n') fig1 = plt.figure() x_ax = range(len(Y_val)) plt.plot(x_ax, Y_val, label="original") plt.plot(x_ax, ypred, label="predicted") plt.title("Test and predicted data") plt.xlim(0, 100) plt.legend() plt.show() print('\n') fig2 = plt.figure() x_ax = range(len(Y_val)) plt.plot(x_ax, Y_val, label="original") plt.plot(x_ax, ypred, label="predicted") plt.title("Test and predicted data") plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="41jxukFhVbOi" executionInfo={"status": "ok", "timestamp": 1622347105198, "user_tz": 240, "elapsed": 8198, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="cef286f6-0790-461e-9b12-238f8c752ee2" modelReg_eval(clf, X_train, Y_train, X_test, Y_test, X_val, Y_val) # + id="WbbsVnPaqWjS" # + [markdown] id="kZ6KrgTRb_xQ" # ## Plot Training Deviance # + colab={"base_uri": "https://localhost:8080/"} id="zhlgIIaRdTi7" executionInfo={"status": "ok", "timestamp": 1622347133848, "user_tz": 240, "elapsed": 292, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="2ecd0e93-43f1-4183-b7d4-e6a721e2075c" print(clf) # + id="S1E9VRDOB0co" # + [markdown] id="E2-OB0IXoixf" # ### Plot feature importance of the XGBoost Regressor Model # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="NoDIOGZqcHz_" executionInfo={"status": "ok", "timestamp": 1622347137145, "user_tz": 240, "elapsed": 345, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="693b4ca8-9169-48a9-8c8b-8ec395a9a262" from xgboost import plot_importance from matplotlib import pyplot # plot feature importance plot_importance(clf) pyplot.show() # + [markdown] id="gfpIR6YvlPHq" # # Evaluate Classification Performance of Offers Completed Using k-Nearest Neighbors (kNN) Algorithm # # + colab={"base_uri": "https://localhost:8080/"} id="RkIEYB5oeBmY" executionInfo={"status": "ok", "timestamp": 1622347227224, "user_tz": 240, "elapsed": 294, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="fbb97c52-b896-4344-95e8-03362530f6ef" from sklearn.neighbors import KNeighborsRegressor # Create KNN model kNN = KNeighborsRegressor(n_neighbors=30) # Train KNN model kNN.fit(X_train, Y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 953} id="ox26avujnBs2" executionInfo={"status": "ok", "timestamp": 1622347230710, "user_tz": 240, "elapsed": 2324, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="b0efacc7-cd21-420b-d8fe-ee3b0a475f71" # Evaluate KNN model performance modelReg_eval(kNN, X_train, Y_train, X_test, Y_test, X_val, Y_val) # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="qiLYUscjnH6L" executionInfo={"status": "ok", "timestamp": 1622347217976, "user_tz": 240, "elapsed": 8323, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="a6073bc3-bcef-40df-9b13-17a65b51265e" # Follows example from https://www.tutorialspoint.com/scikit_learn/scikit_learn_kneighbors_classifier.htm # to evaluate best value of k from sklearn import metrics k_range = range(1,31) scores = {} scores_list = [] for k in k_range: classifier = KNeighborsRegressor(n_neighbors=k) classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) scores[k] = r2_score(Y_test,y_pred) scores_list.append(r2_score(Y_test,y_pred)) #result = metrics.confusion_matrix(Y_test, y_pred) #print("Confusion Matrix:") #print(result) #result1 = metrics.classification_report(Y_test, y_pred) #print("Classification Report:",) #print (result1) # Plot data # %matplotlib inline import matplotlib.pyplot as plt plt.plot(k_range,scores_list) plt.xlabel("Value of K") plt.ylabel("Accuracy") # + [markdown] id="VCGBQbQEm6O4" # ## Train baseline model using a support vector machine (SVM) clusting algorithm # + id="FMRH4XjCvGKO" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1622347320623, "user_tz": 240, "elapsed": 6821, "user": {"displayName": "<NAME>ge", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="d47e2735-bba5-419a-d29e-777fc37d9d6c" from sklearn import svm # Create a SVM regressor with linear kernel clfSVM = svm.SVR(kernel='linear') clfSVM.fit(X_train, Y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 936} id="N5RolsnUnG_v" executionInfo={"status": "ok", "timestamp": 1622347345894, "user_tz": 240, "elapsed": 8406, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="3244dc17-b2a9-4477-86e7-7409892e0c95" modelReg_eval(clfSVM, X_train, Y_train, X_test, Y_test, X_val, Y_val) # + [markdown] id="zY9pFADpcReQ" # ## Train a baseline linear model fitted by minimizing a regularized empirical loss with Stochastic Gradient Descent (SGD) # # # + id="TC0LChZ42I_j" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1622348056925, "user_tz": 240, "elapsed": 224, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="4b933a4d-42c7-4e5f-865d-01dd65c3632c" from sklearn.linear_model import SGDRegressor # Create a linear model fitted by minimizing a regularized empirical loss with Stochastic Gradient Descent (SGD) sgdReg = SGDRegressor() sgdReg.fit(X_train, Y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="as7ynAvAcmOC" executionInfo={"status": "ok", "timestamp": 1622348073167, "user_tz": 240, "elapsed": 944, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgWquGKiHEU5BBAHEWpTKthH9hlPTqxUgOPQshiIQ=s64", "userId": "11055133938004454224"}} outputId="65815a01-1f75-4fd6-dd5f-f6e4051afca2" modelReg_eval(sgdReg, X_train, Y_train, X_test, Y_test, X_val, Y_val) # + id="PUtZtHnuewJs"
07_trainRegression_OfferCompleted.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_amazonei_mxnet_p36 # language: python # name: conda_amazonei_mxnet_p36 # --- # + [markdown] tags=[] # # Explaining Object Detection model with Amazon SageMaker Clarify # In this notebook, we deploy a pre-trained image detection model to showcase how you can use Amazon SagemaMaker Clarify explainability features for Computer Vision, specifically for object detection models including your own ones. # # 1. We first import a model from the Gluon model zoo locally on the notebook, that we then compress and send to S3 # 1. We then use the SageMaker MXNet Serving feature to deploy the model to a managed SageMaker endpoint. It uses the model artifact that we previously loaded to S3. # 1. We query the endpoint and visualize detection results # 1. We explain the predictions of the model using Amazon SageMaker Clarify. # # This notebook can be run with the `conda_python3` Kernel. # # # ## More on Amazon SageMaker Clarify: # # Amazon SageMaker Clarify helps improve your machine learning models by detecting potential bias and helping explain how these models make predictions. The fairness and explainability functionality provided by SageMaker Clarify takes a step towards enabling AWS customers to build trustworthy and understandable machine learning models. The product comes with the tools to help you with the following tasks. # # Measure biases that can occur during each stage of the ML lifecycle (data collection, model training and tuning, and monitoring of ML models deployed for inference). # Generate model governance reports targeting risk and compliance teams and external regulators. # Provide explanations of the data, models, and monitoring used to assess predictions for input containing data of various modalities like numerical data, categorical data, text, and images. # Learn more about SageMaker Clarify here: [https://aws.amazon.com/sagemaker/clarify/](https://aws.amazon.com/sagemaker/clarify/). # # # ## More on `Gluon` and `Gluon CV`: # * [Gluon](https://mxnet.incubator.apache.org/api/python/docs/api/gluon/index.html) is the imperative python front-end of the Apache MXNet deep learning framework. Gluon notably features specialized toolkits helping reproducing state-of-the-art architectures: [Gluon-CV](https://gluon-cv.mxnet.io/), [Gluon-NLP](https://gluon-nlp.mxnet.io/), [Gluon-TS](https://gluon-ts.mxnet.io/). Gluon also features a number of excellent end-to-end tutorials mixing science with code such as [D2L.ai](https://classic.d2l.ai/) and [The Straight Dope](https://gluon.mxnet.io/) # * [Gluon-CV](https://gluon-cv.mxnet.io/contents.html) is an efficient computer vision toolkit written on top of `Gluon` and MXNet aiming to make state-of-the-art vision research reproducible. # # **This sample is provided for demonstration purposes, make sure to conduct appropriate testing if derivating this code for your own use-cases!** # # # ## Index: # 1. Test a pre-trained detection model, locally # 1. Instantiate model # 1. Create endpoint and get predictions (optional) # 1. Run Clarify and interpret predictions # - # ! pip install -r requirements.txt # Let's start by installing the latest version of the SageMaker Python SDK, boto, and AWS CLI. # ! pip install sagemaker botocore boto3 awscli --upgrade # + import datetime import json import math import os import shutil from subprocess import check_call import tarfile from PIL import Image import numpy as np from matplotlib import pyplot as plt import boto3 import botocore import sagemaker from sagemaker import get_execution_role from sagemaker.mxnet.model import MXNetModel import gluoncv from gluoncv import model_zoo, data, utils import mxnet as mx from mxnet import gluon, image, nd # + sm_sess = sagemaker.Session() sm_client = boto3.client("sagemaker") s3_bucket = ( sm_sess.default_bucket() ) # We use this bucket to store model weights - don't hesitate to change. print(f"using bucket {s3_bucket}") # For a sagemaker notebook sm_role = sagemaker.get_execution_role() # Override the role if you are executing locally: # sm_role = "arn:aws:iam::<account>:role/service-role/AmazonSageMaker-ExecutionRole" # - # Constants TEST_IMAGE_DIR = "caltech" # directory with test images MODEL_NAME = "yolo3_darknet53_coco" S3_KEY_PREFIX = "clarify_object_detection" # S3 Key to store model artifacts ENDPOINT_INSTANCE_TYPE = "ml.g4dn.xlarge" ANALYZER_INSTANCE_TYPE = "ml.c5.xlarge" ANALYZER_INSTANCE_COUNT = 1 # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} def gen_unique_name(model_name: str): # Generate a unique name for this user / host combination import hashlib import socket import getpass user = getpass.getuser() host = socket.gethostname() h = hashlib.sha256() h.update(user.encode()) h.update(host.encode()) res = model_name + "-" + h.hexdigest()[:8] res = res.replace("_", "-").replace(".", "") return res # - # ## Test a pre-trained detection model, locally # [Gluon model zoo](https://cv.gluon.ai/model_zoo/index.html) contains a variety of models. # In this demo we use a YoloV3 detection model (Redmon et Farhadi). More about YoloV3: # * Paper https://pjreddie.com/media/files/papers/YOLOv3.pdf # * Website https://pjreddie.com/darknet/yolo/ # # Gluon CV model zoo contains a number of architectures with different tradeoffs in terms of speed and accuracy. If you are looking for speed or accuracy, don't hesitate to change the model net = model_zoo.get_model(MODEL_NAME, pretrained=True) # The model we downloaded above is trained on the COCO dataset and can detect 80 classes. In this demo, we restrict the model to detect only specific classes of interest. # This idea is derived from the official Gluon CV tutorial: https://gluon-cv.mxnet.io/build/examples_detection/skip_fintune.html # # # COCO contains the following classes: print("coco classes: ", sorted(net.classes)) # in this demo we reset the detector to the following classes classes = ["dog", "elephant", "zebra", "bear"] net.reset_class(classes=classes, reuse_weights=classes) print("new classes: ", net.classes) net.hybridize() # hybridize to optimize computation # Get RGB images from the Caltech 256 dataset `[Griffin, <NAME>, <NAME>, P. The Caltech 256. Caltech Technical Report.]` # + import urllib.request import os list_of_images = [ "009.bear/009_0001.jpg", "009.bear/009_0002.jpg", "056.dog/056_0023.jpg", "056.dog/056_0001.jpg", "064.elephant-101/064_0003.jpg", "064.elephant-101/064_0004.jpg", "064.elephant-101/064_0006.jpg", "250.zebra/250_0001.jpg", "250.zebra/250_0002.jpg", ] source_url = "https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/caltech-256/256_ObjectCategories/" if not os.path.exists(TEST_IMAGE_DIR): os.makedirs(TEST_IMAGE_DIR) for image_name in list_of_images: url = source_url + image_name file_name = TEST_IMAGE_DIR + "/" + image_name.replace("/", "_") urllib.request.urlretrieve(url, file_name) # - # Test locally # + import glob test_images = glob.glob(f"{TEST_IMAGE_DIR}/*.jpg") test_images # + [markdown] tags=[] # `gluoncv` comes with built-in pre-processing logic for popular detectors, including YoloV3: # # https://gluon-cv.mxnet.io/_modules/gluoncv/data/transforms/presets/yolo.html # # https://gluon-cv.mxnet.io/build/examples_detection/demo_yolo.html # - # Let's see how the network computes detections in a single image, we have to first resize and reshape, since the original image is loaded with channels in the last dimension and MXNet will expect a shape of (num_batches, channels, width, height) transformed_image, _ = data.transforms.presets.yolo.transform_test(image.imread(test_images[-1])) # The network returns 3 tensors: class_ids, scores and bounding boxes. The default is up to 100 detections, so we get tensor with shape (num batches, detections, ...) where the last dimension is 4 for the bounding boxes as we have upper right corner, and lower right corner coordinates. (cids, scores, bboxs) = net(transformed_image) cids.shape scores.shape bboxs.shape bboxs[:, 0, :] n_pics = len(test_images) n_cols = 3 n_rows = max(math.ceil(n_pics / n_cols), 2) fig, axes = plt.subplots(n_rows, n_cols, figsize=(15, 15)) [ax.axis("off") for ax_dim in axes for ax in ax_dim] for i, pic in enumerate(test_images): curr_col = i % n_cols curr_row = i // n_cols # download and pre-process image print(pic) im_array = image.imread(pic) x, orig_img = data.transforms.presets.yolo.transform_test(im_array) # forward pass and display box_ids, scores, bboxes = net(x) ax = utils.viz.plot_bbox( orig_img, bboxes[0], scores[0], box_ids[0], class_names=classes, thresh=0.9, ax=axes[curr_row, curr_col], ) ax.axis("off") ax.set_title(pic, pad=15) fig.tight_layout() fig.show(); # ## Deploy the detection server # 1. We first need to **send the model to S3**, as we will provide the S3 model path to Amazon SageMaker endpoint creation API # 1. We create a **serving script** containing model deserialization code and inference logic. This logic is in the `repo` folder. # 1. We **deploy the endpoint** with a SageMaker SDK call # ### Save local model, compress and send to S3 # Clarify needs a model since it will spin up its own inference endpoint to get explanations. We will now export the local model, archieve it and then create a **SageMaker model** from this archieve which allows to create other resources that depend on this model. # save the full local model (both weights and graph) net.export(MODEL_NAME, epoch=0) # compress into a tar file model_file = "model.tar.gz" tar = tarfile.open(model_file, "w:gz") tar.add("{}-symbol.json".format(MODEL_NAME)) tar.add("{}-0000.params".format(MODEL_NAME)) tar.close() # upload to s3 model_data_s3_uri = sm_sess.upload_data(model_file, key_prefix=S3_KEY_PREFIX) model_data_s3_uri # ## Instantiate model # We use batching of images on the predictor entry_point in order to achieve higher performance as utilization of resources is better than one image at a time. # + model = MXNetModel( model_data=model_data_s3_uri, role=sm_role, py_version="py37", entry_point="detection_server_batch.py", source_dir="repo", framework_version="1.8.0", sagemaker_session=sm_sess, ) container_def = model.prepare_container_def(instance_type=ENDPOINT_INSTANCE_TYPE) model_name = gen_unique_name(MODEL_NAME) sm_sess.create_model(role=sm_role, name=model_name, container_defs=[container_def]) # - # ## (Optional) Create endpoint and get predictions, model IO in depth # In this optional section we deploy an endpoint to get predictions and dive deep into details that can be helpful to troubleshot issues related to expected model IO format of predictions, serialization and tensor shapes. # # Common pitfalls are usually solved by making sure we are using the right serializer and deserializer and that the model output conforms to the expectations of Clarify in terms of shapes and semantics of the output tensors. # # In general, Clarify expectes that our model receieves a batch of images and outputs a batch of image detections with a tensor having the following elements: **class id, prediction score and normalized bounding box of the detection.** endpoint_name = gen_unique_name(MODEL_NAME) endpoint_name # Delete any previous enpoint try: sm_sess.delete_endpoint(endpoint_name) except: pass # Delete any stale endpoint config try: sm_sess.delete_endpoint_config(endpoint_name) except botocore.exceptions.ClientError as e: print(e) pass # Deploy the model in a SageMaker endpoint # + import sagemaker.serializers import sagemaker.deserializers print(model.name) predictor = model.deploy( initial_instance_count=1, instance_type=ENDPOINT_INSTANCE_TYPE, endpoint_name=endpoint_name, serializer=sagemaker.serializers.NumpySerializer(), deserializer=sagemaker.deserializers.JSONDeserializer(), ) # - predictor.deserializer predictor.serializer predictor.accept # Let's go in detail on how the detection server works, let's take the following test image as an example: im = Image.open(test_images[0]) im # Since we overrode the `transform_fn` making it support batches and normalizing the detection boxes, we feed a tensor with a single batch, H, W and the 3 color channels as input im_np = np.array([np.asarray(im)]) im_np.shape (H, W) = im_np.shape[1:3] (H, W) # Send the image to the predictor and get detections tensor = np.array(predictor.predict(im_np)) tensor tensor.shape # Our prediction has one batch, 3 detections and 6 elements containing class_id, score and normalized box with upper left corner, and lower left corner. box_scale = np.array([W, H, W, H]) # To display the detections we undo the normalization and split the detection format that clarify uses so we use the gluon plot_bbox function with the non-normalized boxes and separate scores and class ids from detections box_scale numdet = tensor.shape[1] cids = np.zeros(numdet) scores = np.zeros(numdet) bboxes = np.zeros((numdet, 4)) for i, det in enumerate(tensor[0]): cids[i] = det[0] scores[i] = det[1] bboxes[i] = det[2:] bboxes[i] *= box_scale bboxes[i] = bboxes[i].astype("int") bboxes scores utils.viz.plot_bbox(np.asarray(im), bboxes, scores, cids, class_names=classes, thresh=0.8) # We can group the logic above in a function to make it more convenient to use def detect(pic, predictor): """elementary function to send a picture to a predictor""" im = Image.open(pic) im = im.convert("RGB") im_np = np.array([np.asarray(im)]) (h, w) = im_np.shape[1:3] prediction = np.array(predictor.predict(im_np)) box_scale = np.array([w, h, w, h]) numdet = prediction.shape[1] cids = np.zeros(numdet) scores = np.zeros(numdet) bboxes = np.zeros((numdet, 4)) for i, det in enumerate(prediction[0]): cids[i] = det[0] scores[i] = det[1] bboxes[i] = det[2:] bboxes[i] *= box_scale bboxes[i] = bboxes[i].astype("int") return (cids, scores, bboxes) # %%time pic = test_images[0] cids, scores, bboxes = detect(pic, predictor) cids bboxes # for local viz we need to resize local pic to the server-side resize _, orig_img = data.transforms.presets.yolo.load_test(pic) utils.viz.plot_bbox(orig_img, bboxes, scores, cids, class_names=classes, thresh=0.9) cids # There's a single detection of a dog which is class index 0 as in the beginning of the notebook where we called `reset_class` # ## Amazon Sagemaker Clarify # # We will now showcase how to use SageMaker Clarify to explain detections by the model, for that we have already done some work in `detection_server_batch.py` to filter out missing detections with index `-1` and we have normalized the boxes to the image dimensions. We only need to upload the data to s3, provide the configuration for Clarify in the `analysis_config.json` describing the explainability job parameters and execute the processing job with the data and configuration as inputs. As a result, we will get in S3 the explanation for the detections of the model. # # Clarify expects detections to be in the format explored in the cells above. Detections should come in a tensor of shape `(num_images, batch, detections, 6)`. The first number of each detection is the predicted class label. The second number is the associated confidence score for the detection. The last four numbers represent the bounding box coordinates `[xmin / w, ymin / h, xmax / w, ymax / h]`. These output bounding box corner indices are normalized by the overall image size dimensions, where `w` is the width of the image, and `h` is the height. # Upload some test images to get explanations s3_test_images = f"{S3_KEY_PREFIX}/test_images" # !mkdir -p test_images # !cp {TEST_IMAGE_DIR}/009.bear_009_0002.jpg test_images # !cp {TEST_IMAGE_DIR}/064.elephant-101_064_0003.jpg test_images dataset_uri = sm_sess.upload_data("test_images", key_prefix=s3_test_images) dataset_uri # We use this noise image as a baseline to mask different segments of the image during the explainability process baseline_uri = sm_sess.upload_data("noise_rgb.png", key_prefix=S3_KEY_PREFIX) # It's very important that `predictor.content_type` and `predictor.accept_type` in the json fields below match the sagemaker python sdk `predictor.serializer` and `predictor.deserializer` class instances above such as `sagemaker.serializers.NumpySerializer` so Clarify job can use the right (de)serializer. # ### Clarify job configuration for object detection type of models # We will configure important parameters of the Clarify job for object detection under `image_config`: # # * **num_samples**: This number determines the size of the generated synthetic dataset to compute the SHAP values. More samples will produce more accurate explanations but will consume more computational resources # * **baseline**: image that will be used to mask segments during Kernel SHAP # * **num_segments**: number of segments to partition the detection image into # * **max_objects**: maximum number of objects starting from the first that will be considered sorted by predicted score # * **iou_threshold**: minimum IOU for considering predictions against the original detections, as detection boxes will shift during masking # * **context**: whether to mask the image background when running SHAP, takes values 0 or 1 # # # <hr/> # # Below we use the [Sagemaker Python SDK](https://sagemaker.readthedocs.io/en/stable/api/training/processing.html?highlight=clarify#module-sagemaker.clarify) which helps create an [Analysis configuration](https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-configure-processing-jobs.html) but using higher level Python classes. # # + pycharm={"name": "#%%\n"} from sagemaker.clarify import ( SageMakerClarifyProcessor, ModelConfig, DataConfig, SHAPConfig, ImageConfig, ModelPredictedLabelConfig, ) from sagemaker.utils import unique_name_from_base # - # Configure parameters of the Clarify Processing job. The job has one input, the config file and one output, the resulting analysis of the model. # + analyzer_instance_count = 1 analyzer_instance_type = "ml.c5.xlarge" output_bucket = sm_sess.default_bucket() # Here we specify where to store the results. analysis_result_path = "s3://{}/{}/{}".format(output_bucket, S3_KEY_PREFIX, "cv_analysis_result") clarify_processor: SageMakerClarifyProcessor = SageMakerClarifyProcessor( role=sm_role, instance_count=analyzer_instance_count, instance_type=analyzer_instance_type, max_runtime_in_seconds=3600, sagemaker_session=sm_sess, ) model_config: ModelConfig = ModelConfig( model_name=model_name, instance_count=1, instance_type=ENDPOINT_INSTANCE_TYPE, content_type="application/x-npy", ) data_config: DataConfig = DataConfig( s3_data_input_path=dataset_uri, s3_output_path=analysis_result_path, dataset_type="application/x-image", ) image_config: ImageConfig = ImageConfig( model_type="OBJECT_DETECTION", feature_extraction_method="segmentation", num_segments=20, segment_compactness=5, max_objects=5, iou_threshold=0.5, context=1.0, ) shap_config: SHAPConfig = SHAPConfig( baseline=baseline_uri, num_samples=500, image_config=image_config, ) predictions_config = ModelPredictedLabelConfig(probability_threshold=0.8, label_headers=net.classes) # - # Now run the processing job, it will take approximately 6 minutes. clarify_processor.run_explainability( data_config=data_config, model_config=model_config, model_scores=predictions_config, explainability_config=shap_config, job_name=unique_name_from_base("clarify-cv-object-detection"), wait=True, ) # We download the results of the Clarify job and inspect the attributions # !aws s3 cp --recursive {analysis_result_path} cv_analysis_result im = Image.open("cv_analysis_result/shap_064.elephant-101_064_0003_box1.jpeg") im im = Image.open("cv_analysis_result/shap_064.elephant-101_064_0003_box2.jpeg") im im = Image.open("cv_analysis_result/064.elephant-101_064_0003_objects.jpeg") im # ### Cleanup of resources # We delete the previous endpoint sm_sess.delete_endpoint(endpoint_name)
sagemaker-clarify/computer_vision/object_detection/object_detection_clarify.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lecture 13: Other programming languages # [Download on GitHub](https://github.com/NumEconCopenhagen/lectures-2020) # # [<img src="https://mybinder.org/badge_logo.svg">](https://mybinder.org/v2/gh/NumEconCopenhagen/lectures-2020/master?urlpath=lab/tree/13/Other_programming_languages.ipynb) # # (under construction)
web/13/Other_programming_languages.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Preface # In this blog post we fit a _naive Bayes_ model to predict games _critic ratings_ given their genres. Data are scraped from [best metacritic ps4 games](https://www.metacritic.com/browse/games/score/metascore/year/ps4/filtered?year_selected=2019) through a [scrapper of mine](https://github.com/mostafatouny/data-scraper/). We do not claim a result of value. An introductory recommender system course yields results much more accurate and reliable than approach presented here. Why am I doing this then? As machine learning today is overwhelmingly hyped, It is nice to have a facet of it in my portfolio. Jump directly to [discussion](#discussion) if you are not concerned with coding details. # ___ # ## Table of Contentes # Intro # - [Preface](#preface) # # Data Preprocessing # - [Data Cleansing](#data-cleansing) # - [Discretize Critic Rating](#discretize-critic-rating) # - [Obtain Unique Series of Genres](#obtain-unique-series-of-genres) # - [Create Column For Each Genre. Its Value Corresponds To Whether It is in Game's Genres](#create-column-For-each-genre-its-value-corresponds-to-whether-it-is-in-Games-Genres) # # Applying Machine Learning # - [Naive Gaussian Bayes](#naive-gaussian-bayes) # - [Predicting Upcoming Games](#predicting-upcoming-games) # - [Discussion](#discussion) # ___ # ### Import Libraries and Local Files # 3rd-party libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB # local-files import jsonRW as jsRW import discretizeIntoCategories as discIntCat # ___ # ## Data Cleansing # ### Read Data # read its local json file metacritic_json = jsRW.readJson('metacritic2019ps4_data') # parse it as pandas dataframe, then map platform name to it df = pd.DataFrame(metacritic_json) df # ___ # ### Drop Irrelevant Columns df = df.drop(['developer', 'name', 'publisher', 'release_data', 'users_rating'], axis=1) df # ### Critic Rating Data Type To Integer df.dtypes df['critic_rating'] = pd.to_numeric(df['critic_rating']) df.dtypes # ___ # ### Discretize Critic Rating # categories to be mapped as they fall within certain ranges categories = pd.Series(["very_low", "low", "moderate", "high", "very_high"]) # critic ratings ranges to be mapped intervals_categories = [0, 20, 40, 60, 80] # compute categories according to ranges specified df['category'] = df.apply(discIntCat.numToCat, axis=1, args=('critic_rating', categories, intervals_categories)) # let categories be recognized by pandas df['category'] = df['category'].astype("category") # order categories df['category'] = df['category'].cat.set_categories(categories, ordered=True) df # ___ # ### Drop Critic Rating df = df.drop(['critic_rating'], axis=1) df # ___ # ### Obtain Unique Series of Genres sr_genres = df['genres'] sr_genres # concatenate genres lists, then filter duplicated elements unique_genres = np.unique(np.concatenate(sr_genres, axis=0)) unique_genres # ___ # ### Remove Spaces, Slashes and Dashes From Genres Names # spaces, slashes and dashes converter to underscores and empty string def underscoreCleaner(strLis_in): tem_string = strLis_in tem_string = tem_string.replace(' ', '_') tem_string = tem_string.replace('/', '') tem_string = tem_string.replace('-', '_') return tem_string temLis = pd.Series(unique_genres) # apply cleaner temLis = temLis.apply(underscoreCleaner) cleanedUniqueGenres = temLis cleanedUniqueGenres # ___ # ### Create Column For Each Genre. Its Value Corresponds To Whether It is in Game's Genres # maps genresList_in to a boolean array, corresponding to whether a genre is in game's genres list def isGenreIn(row_in, column_in, genresList_in): # game's genres list row_value = pd.Series(row_in[column_in]) # all unique genres genresSer = pd.Series(genresList_in) # check whether each genre in all unique genres is in game's genres list # return a boolean array, corresponding to whether genre is found in game's list. return genresSer.isin(row_value) # apply above function genresAsColumns = df.apply(isGenreIn, axis=1, args=('genres', cleanedUniqueGenres)) # rename columns to all unique genres genresAsColumns.columns = cleanedUniqueGenres genresAsColumns # map boolean values to integers. True to 1 and False to 0 genresAsColumns = genresAsColumns.astype(int) genresAsColumns # ___ # ### Map Classes To Their Corresponding Numeric Indices # a mapper from class string to its index classesArr = np.array(["very_low", "low", "moderate", "high", "very_high"]) catToIndDict = {"very_low":0, "low":1, "moderate":2, "high":3, "very_high":4} def catToInd(str_in): return catToIndDict[str_in] classes = df['category'] classes # apply above mapper classes = classes.apply(lambda x: catToInd(x)) classes # ___ # ## Naive Gaussian Bayes # ### Convert X and Y To Numpy Arrays genresAsColumns = genresAsColumns.to_numpy() classes = classes.to_numpy() genresAsColumns classes # ### Split Data For Testing and Training X_train, X_test, y_train, y_test = train_test_split(genresAsColumns, classes, test_size=0.3, random_state=0) # ### Fit Model # initialize a guassian model gnb = GaussianNB() # fit model games genres and classes clf = gnb.fit(X_train, y_train) # ### Predicting Test Data y_pred = clf.predict(X_test) # ### Accuracy # Number of correctly labeled points out of total, respectively correctlyLabeledNum = (y_test == y_pred).sum() totalPointsNum = X_test.shape[0] print("Number of Correctly Labeled: ", correctlyLabeledNum) print("Number of Total Points: ", totalPointsNum) print("Accuracy Percentage: ", correctlyLabeledNum/totalPointsNum) # ___ # ## Predicting Upcoming Games # ### A Mapper From A Series Element To Its Corresponding Index def indexFromName(series_in, str_in): return series_in[series_in == str_in].index[0] indexFromName(cleanedUniqueGenres, '2D') # ### Construct Array of Indices From Strings def arrayFromNames(series_in, strLis_in): # Initialize an array of 79 zeros, corresponding to genres numbers gameGenres = np.array([0] * 79) # for each input genre, set its corresponding value to 1 for name in strLis_in: gameGenres[indexFromName(series_in, name)] = 1 return gameGenres # ### Predict Class Name From Genres Strings def predictClassFromGenres(strLis_in): # map genres strings to their corresponding indices gameGenreArray = arrayFromNames(cleanedUniqueGenres, strLis_in) # prediction of class numeric value gamePred = clf.predict([ gameGenreArray ]) # return predicted class name return classesArr[gamePred[0]] # ___ # ### Doom Eternal # ![doom-eternal](./doom-eternal.png) predictClassFromGenres(['Action', 'Shooter', 'First_Person', 'Arcade']) # ### Control: The Foundation # ![control-the-foundation](./control-the-foundation.png) predictClassFromGenres(['Action_Adventure', 'General']) # ### Resident Evil 3 # ![resident-evil-3](./resident-evil-3.png) predictClassFromGenres(['Action_Adventure', 'Survival']) # ___ # ### Discussion # Let's now analyze the embarrassing results I reached. A realization of ignorance is not as bad as an ignorance of being ignorant. For the latter case, There is no chance for remediation, but for the former, I am skeptic of guaranteed chances. # # **There is no pattern to be fitted**. I have seen plenty of computer science students who just care about machine learning models and give no interest to the data itself! That is exactly alike claiming astronomy is all about telescopes. In fact, Data science is all based about our understanding of real-life data and whether we could discover and verify patterns found in them. Machine learning models are toolbox for the data scientist so that he could reveal insights in data, but they are not his principal goal. Regarding our case in this blog post, It is well-known that genres are not indicators of a game's quality at all. If the data contains no pattern, then the hypothesized pattern shall not emerge from whatever model you apply. I would doubt my self in case the model reached a high accuracy rate. # # **Features vectors is ridiculously simplifing the item**. Two _action-adventure_ _3rd-person_ games probably have totally different playing-style/theme. Simplifing games by their genres is alike describing a student's skills qualifications through his faculty. Is graduating from CS major an indicator of student's skills? He might be either a lazy or a dedicated student. A curious and challenging inquiry arises here. How do we represent aesthetics in terms of numbers? How do we objectively measure a game's degree of fun? Is it even possible for science to reach at someday objective measures of human-feelings? The only aspect I am sure of is that no one is sure of answers to these questions (sounds like a self-contradictory statement, right?) # # Finally, Note that _naive bayes_ is based on the assumption that features are independant from each other, which is not the case here. _Action_ games are more likely to be _adventure_, for instance.
content/post/first-machine-learning/index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Using ms2deepscore: How to load data, train a model, and compute similarities. # + from pathlib import Path from matchms.importing import load_from_mgf from tensorflow import keras import pandas as pd from ms2deepscore import SpectrumBinner from ms2deepscore.data_generators import DataGeneratorAllSpectrums from ms2deepscore.models import SiameseModel from ms2deepscore import MS2DeepScore # - # ## Data loading # Here we load in a small sample of test spectrum as well as reference scores data. TEST_RESOURCES_PATH = Path.cwd().parent / 'tests' / 'resources' spectrums_filepath = str(TEST_RESOURCES_PATH / "pesticides_processed.mgf") score_filepath = str(TEST_RESOURCES_PATH / "pesticides_tanimoto_scores.json") # Load processed spectrums from .mgf file. For processing itself see [matchms](https://github.com/matchms/matchms) documentation. spectrums = list(load_from_mgf(spectrums_filepath)) # Load reference scores from a .json file. This is a Pandas DataFrame with reference similarity scores (=labels) for compounds identified by inchikeys. Columns and index should be inchikeys, the value in a row x column depicting the similarity score for that pair. Must be symmetric (reference_scores_df[i,j] == reference_scores_df[j,i]) and column names should be identical to the index. tanimoto_scores_df = pd.read_json(score_filepath) # ## Data preprocessing # Bin the spectrums using `ms2deepscore.SpectrumBinner`. In this binned form we can feed spectra to the model. spectrum_binner = SpectrumBinner(1000, mz_min=10.0, mz_max=1000.0, peak_scaling=0.5) binned_spectrums = spectrum_binner.fit_transform(spectrums) # Create a data generator that will generate batches of training examples. # Each training example consists of a pair of binned spectra and the corresponding reference similarity score. dimension = len(spectrum_binner.known_bins) data_generator = DataGeneratorAllSpectrums(binned_spectrums, tanimoto_scores_df, dim=dimension) # ## Model training # Initialize a SiameseModel. It consists of a dense 'base' network that produces an embedding for each of the 2 inputs. The 'head' model computes the cosine similarity between the embeddings. model = SiameseModel(spectrum_binner, base_dims=(200, 200, 200), embedding_dim=200, dropout_rate=0.2) model.compile(loss='mse', optimizer=keras.optimizers.Adam(lr=0.001)) model.summary() # Train the model on the data, for the sake of simplicity we use the same dataset for training and validation. model.fit(data_generator, validation_data=data_generator, epochs=2) # ## Model inference # Calculate similariteis for a pair of spectra similarity_measure = MS2DeepScore(model) score = similarity_measure.pair(spectrums[0], spectrums[1]) print(score) # Calculate similarities for a 3x3 matrix of spectra scores = similarity_measure.matrix(spectrums[:3], spectrums[:3]) print(scores)
notebooks/MS2DeepScore_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import ipywidgets as widgets from IPython.display import display, HTML javascript_functions = {False: "hide()", True: "show()"} button_descriptions = {False: "Show code", True: "Hide code"} def toggle_code(state): """ Toggles the JavaScript show()/hide() function on the div.input element. """ output_string = "<script>$(\"div.input\").{}</script>" output_args = (javascript_functions[state],) output = output_string.format(*output_args) display(HTML(output)) def button_action(value): """ Calls the toggle_code function and updates the button description. """ state = value.new toggle_code(state) value.owner.description = button_descriptions[state] state = False #toggle_code(state) button = widgets.ToggleButton(state, description = button_descriptions[state]) button.observe(button_action, "value") display(button) # - from windows_widgets import CompileOutputOnly, CompileInputOuput, ShortAnswerQuestion, ChoiceQuestion, AchieveRate, add_link_buttons # # Chapter 4 - 반복문 # ## 4.2 while 문 # 문장 while (cond) stmt;는 반복 조건인 cond를 평가하여 0(거짓)이면 while 문을 종료하며, 0이 아니면(참) 반복 몸체인 stmt를 실행하고 다시 반복 조건 cond를 평가하여 while 문 종료 시까지 반복한다. 반복 몸체인 stmt는 필요하면 블록으로 구성할 수 있다. while 문은 for 문보다 간편하며 모든 반복 기능을 수행할 수 있다. co1 = CompileOutputOnly('exer3') # 다음 예제는 for 문으로 무한 반복을 하며 1에서 20 사이 정수 또는 0을 입력받는다. 0을 입력받으면 break 문을 이용하여 for 문을 빠져나와 프로그램이 종료된다. 1 에서 20 사이의 정수를 입력받으면 그 수까지 곱을 구하여 출력한다. cio1 = CompileInputOuput('exer4') # ## 연습문제 saq1 = ShortAnswerQuestion('(1) ___문은 조건에 따른 선택을 지원하는 구문이다.', ['조건'], ' 조건문에 대한 설명이다.', ' if문, switch문이 조건문에 해당한다.') saq2 = ShortAnswerQuestion("""반복 내부에서 반복을 종료하려면 ____ 문장을 사용한다.""", ['break'], ' break 키워드가 쓰인다.', ' break는 반복을 즉각 종료할 때 사용한다.') cq1 = ChoiceQuestion("""다음 중에서 조건 선택에 해당되지 않는 제어문은 무엇인가?""", ['if', 'if else', 'switch', 'break'], 3, ' if, if else, switch 모두 조건을 제어하는 데 사용된다.', ' break는 반복을 종료할 때 사용한다.') rate = AchieveRate() add_link_buttons(0, 'sample_windows1.ipynb', 'sample_windows3.ipynb')
.ipynb_checkpoints/sample_windows2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D3-ModelFitting/W1D3_Tutorial4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="sSukjRujSzSt" # # Neuromatch Academy: Week 1, Day 3, Tutorial 4 # # Model Fitting: Multiple linear regression # + [markdown] colab_type="text" id="it1CnPC-85R_" # #Tutorial Objectives # # This is Tutorial 4 of a series on fitting models to data. We start with simple linear regression, using least squares optimization (Tutorial 1) and Maximum Likelihood Estimation (Tutorial 2). We will use bootstrapping to build confidence intervals around the inferred linear model parameters (Tutorial 3). We'll finish our exploration of linear models by generalizing to multiple linear regression (Tutorial 4). We then move on to polynomial regression (Tutorial 5). We end by learning how to choose between these various models. We discuss the bias-variance trade-off (Tutorial 6) and two common methods for model selection, AIC and Cross Validation (Tutorial 7). # # In this tutorial, we will generalize our linear model to incorporate multiple linear features. # - Learn how to structure our inputs for multiple linear regression using the 'Design Matrix' # - Generalize the MSE for multiple features using the ordinary least squares estimator # - Visualize our data and model fit in multiple dimensions # + [markdown] colab_type="text" id="MjRqxf9FloZk" # # Setup # + cellView="form" colab={} colab_type="code" id="7lFaCKBThezP" # @title Imports import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # + cellView="form" colab={} colab_type="code" id="BBuWjOPHXx-v" #@title Figure Settings # %matplotlib inline fig_w, fig_h = (8, 6) plt.rcParams.update({'figure.figsize': (fig_w, fig_h)}) # %config InlineBackend.figure_format = 'retina' # + [markdown] colab_type="text" id="JYHaWE5h0LlJ" # # Multiple Linear Regression # # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 518} colab_type="code" id="wDY1rFtEX3ac" outputId="f7cdc296-535a-49e8-a1a2-4b4640bcee41" #@title Video: Multiple Linear Regression from IPython.display import YouTubeVideo video = YouTubeVideo(id="uQjKnlhGEVY", width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="-MyWwEYwX3zG" # Now that we have considered the univariate case and how to produce confidence intervals for our estimator, we turn now to the general linear regression case, where we can have more than one regressor, or feature, in our input. # # Recall that our original univariate linear model was given as # # \begin{align} # y = \theta_0 + \theta_1 x # \end{align} # # where $\theta_0$ is the intercept, $\theta_1$ is the slope. We can easily extend this to the multivariate scenario by adding another parameter for each additional feature # # \begin{align} # y = \theta_0 + \theta_1 x_1 + \theta_1 x_2 + ... +\theta_d x_d # \end{align} # # where $d$ is the dimensionality (number of features) in our input. # # We can condense this succinctly using vector notation for a single data point # # \begin{align} # y_i = \boldsymbol{\theta}^{\top}\mathbf{x}_i # \end{align} # # and fully in matrix form # # \begin{align} # \mathbf{y} = \mathbf{X}\boldsymbol{\theta} # \end{align} # # where $\mathbf{y}$ is a vector of measurements, $\mathbf{X}$ is a matrix containing the feature values (columns) for each input sample (rows), and $\boldsymbol{\theta}$ is our parameter vector. # # This matrix $\mathbf{X}$ is often referred to as the "[design matrix](https://en.wikipedia.org/wiki/Design_matrix)". # + [markdown] colab_type="text" id="CBx3LluxQSWY" # For this tutorial we will focus on the two-dimensional case ($d=2$), which allows us to fully explore the multivariate case while still easily visualizing our results. As an example, think of a situation where a scientist records the spiking response of a retinal ganglion cell to patterns of light signals that vary in contrast and in orientation. # # In this case our model can be writen as # # \begin{align} # y = \theta_0 + \theta_1 x_1 + \theta_2 x_2 + \epsilon # \end{align} # # or in matrix form where # # \begin{align} # \mathbf{X} = # \begin{bmatrix} # 1 & x_{1,1} & x_{1,2} \\ # 1 & x_{2,1} & x_{2,2} \\ # \vdots & \vdots & \vdots \\ # 1 & x_{n,1} & x_{n,2} # \end{bmatrix}, # \boldsymbol{\theta} = # \begin{bmatrix} # \theta_0 \\ # \theta_1 \\ # \theta_2 \\ # \end{bmatrix} # \end{align} # # For our actual exploration dataset we shall set $\boldsymbol{\theta}=[0, -2, -3]$ and draw $N=40$ noisy samples from $x \in [-2,2)$. Note that setting the value of $\theta_0 = 0$ effectively ignores the offset term. # + cellView="both" colab={} colab_type="code" id="3eFkROv0odWA" np.random.seed(121) theta = [0, -2, -3] n_samples = 40 n_regressors = len(theta) x = np.random.uniform(-2, 2, (n_samples, n_regressors)) noise = np.random.randn(n_samples) y = x @ theta + noise # + [markdown] colab_type="text" id="vUxoyKRpShGH" # Now that we have our dataset, we want to find an optimal vector of paramters $\boldsymbol{\hat\theta}$. Recall our analytic solution to minimizing MSE for a single regressor: # # \begin{align} # \hat\theta = \sum_i \frac{x_i y_i}{x_i^2}. # \end{align} # # The same holds true for the multiple regressor case, only now expressed in matrix form # # \begin{align} # \boldsymbol{\hat\theta} = (\mathbf{X}^\top\mathbf{X})^{-1}\mathbf{X}^\top\mathbf{y}. # \end{align} # # This is called the [ordinary least squares](https://en.wikipedia.org/wiki/Ordinary_least_squares) (OLS) estimator. # + [markdown] colab_type="text" id="py0CbmHI3eVS" # ### Exercise: Ordinary Least Squares Estimator # # In this exercise you will implement the OLS approach to estimating $\boldsymbol{\hat\theta}$ from the design matrix $\mathbf{X}$ and measurement vector $\mathbf{y}$. You can the `@` symbol for matrix multiplication, `.T` for transpose, and `np.linalg.inv` for matrix inversion. # # + colab={} colab_type="code" id="0AdBqizy0s3o" def ordinary_least_squares(x, y): """Ordinary least squares estimator for linear regression. Args: x (ndarray): design matrix of shape (n_samples, n_regressors) y (ndarray): vector of measurements of shape (n_samples) Returns: ndarray: estimated parameter values of shape (n_regressors) """ ###################################################################### ## TODO for students: solve for the optimal parameter vector using OLS ###################################################################### # comment this out when you've filled raise NotImplementedError("Student excercise: solve for theta_hat vector using OLS") return theta_hat # + cellView="both" colab={} colab_type="code" id="n7wJNG-nXuEn" # to_remove solution def ordinary_least_squares(x, y): """Ordinary least squares estimator for linear regression. Args: x (ndarray): design matrix of shape (n_samples, n_regressors) y (ndarray): vector of measurements of shape (n_samples) Returns: ndarray: estimated parameter values of shape (n_regressors) """ theta_hat = np.linalg.inv(x.T @ x) @ x.T @ y return theta_hat # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="-la11_Uu00Jf" outputId="868d60e8-aecf-4655-9f9a-fb79bbd15deb" theta_hat = ordinary_least_squares(x, y) theta_hat # + [markdown] colab_type="text" id="shpxp-hdr6Ah" # Now that we have our $\mathbf{\hat\theta}$, we can obtain $\mathbf{\hat y}$ and thus our mean squared error. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="YNr9zDVPEc54" outputId="52498e8b-fbc5-4f50-d043-033751c15e90" y_hat = x @ theta_hat print(f"MSE = {np.mean((y - y_hat)**2):.2f}") # + [markdown] colab_type="text" id="jsPqRTOG3hPX" # Finally, the following code will plot a geometric visualization of the data points (blue) and fitted plane. You can see that the residuals (green bars) are orthogonal to the hyperplane. # + colab={"base_uri": "https://localhost:8080/", "height": 441} colab_type="code" id="Q0qd240a1dzr" outputId="9b78d10d-5734-4ce0-9ee3-b90bc66a0c84" xx, yy = np.mgrid[-2:2:50j, -2:2:50j] y_hat_grid = np.array([xx.flatten(), yy.flatten()]).T @ theta_hat[1:] y_hat_grid = y_hat_grid.reshape((50, 50)) ax = plt.subplot(projection='3d') ax.plot(x[:,1], x[:,2], y, '.') ax.plot_surface(xx, yy, y_hat_grid, linewidth=0, alpha=0.5, color='C1', cmap=plt.get_cmap('coolwarm')) for i in range(len(x)): ax.plot((x[i, 1], x[i, 1]), (x[i, 2], x[i, 2]), (y[i], y_hat[i]), 'g-', alpha=.5) ax.set( xlabel='$x_1$', ylabel='$x_2$', zlabel='y' ) plt.tight_layout() # + [markdown] colab_type="text" id="7ZFV7RCgtNv_" # # Summary # - linear regression generalizes naturally to multiple dimensions # - linear algebra affords us the mathematical tools to reason and solve such problems beyond the two dimensional case # # **NOTE** in practice, multidimensional least squares problems can be solve very efficiently (thanks to numerical routines such as LAPACK). # + colab={} colab_type="code" id="SezkuEUOmCpw"
tutorials/W1D3_ModelFitting/W1D3_Tutorial4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # FIFA 19 Data Wrangling and Cleaning # ## External Dataset # An external dataset for the FIFA 19 dataset is not necessary because the dataset contains all the required relevant features. # ## Set Up Python Libraries # + # import necessary libraries # %matplotlib inline import pandas as pd import numpy as np import scipy import matplotlib.pyplot as plt # the more advanced python visualization library import seaborn as sns # apply style to all the charts sns.set_style('whitegrid') # convert scientific notation to decimals pd.set_option('display.float_format', lambda x: '%.2f' % x) # - # ## Load the Data # + df_fifa = pd.read_csv('raw data/fifa19data.csv') df_fifa.rename(columns={'Unnamed: 0':'Index'}, inplace=True) df_fifa # - # ## Summarizing Data for Inspection print('FIFA 19 Player Data') print(df_fifa.columns) print(df_fifa.head()) print(df_fifa.describe()) # ## Removing Columns # Column names in the dataset are correct but columns that should be removed exist. Columns from the dataset that are predominantly empty due to applicability reasons or are irrelevant to our analysis are removed. Thus, the columns from the dataset that are removed are: # - Index (remove prior to writing clean dataset to csv file) # - ID # - Photo # - Flag # - Club Logo # - Body Type # - Real Face # - Jersey Number # - Joined # - Loaned From # - Contract Valid Until # - Release Clause # + data_clean=df_fifa.drop(['ID', 'Photo', 'Flag', 'Club Logo', 'Body Type', 'Real Face', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Release Clause'], axis=1) data_clean # - # ## Converting Data Types # Height is converted to a single float value from the feet'inches string in the dataframe column using the formula feet x 12 + inches. data_clean['Height'] = data_clean['Height'].str.split("'").str[0].astype(float)*12 + data_clean['Height'].str.split("'").str[1].astype(float) print(data_clean['Height']) # Weight is converted to an integer from a string and the string 'lbs' is removed. # Symbols are removed from each entry in the Value and Wage columns. Value is converted to float from string while Wage is converted to an integer from a string. # + # Weight data_clean['Weight'] = data_clean['Weight'].str.replace('lbs', '').astype(float) # Value data_clean['Value'] = data_clean['Value'].str.replace('€', '') zero_value = data_clean[data_clean['Value'].str[0] == '0'] zero_value = zero_value['Value'].astype(float) m = data_clean[data_clean['Value'].str[-1] == 'M'] m = m['Value'].str.replace('M', '').astype(float) k = data_clean[data_clean['Value'].str[-1] == 'K'] # those with values in the thousands (K) rather than in the millions (M), divide by 1000 to put quantities on same scale k = k['Value'].str.replace('K', '').astype(float)/1000 data_clean['Value'] = pd.concat([m, k, zero_value]) # Wage data_clean['Wage'] = data_clean['Wage'].str.replace('€', '') data_clean['Wage'] = data_clean['Wage'].str.replace('K', '').astype(float) # - # ## Removing Duplicates # Duplicates are not present in this dataset. Each row represents one individual player in the game. # ## Missing Values # Determine which columns have missing values and how many each column has. print(data.isnull().sum()) # Upon review of the missing value data above and the dataset, there are 48 players who are missing the same majority of player attribute data; thus, their player profiles cannot be made. To deal with this portion of missing data, these players will be removed from the dataset. # remove the 48 players of interest from the dataset data_clean = data_clean[data_clean['Preferred Foot'].notnull()] # Players missing a value for Club will be assigned a string value 'No Club' to replace the null value. Players without a position will be assigned 'NP' for No Position to replace the null value. # + # assign 'No Club' to players with a nan value for club data_clean['Club'] = data_clean['Club'].fillna('No Club') # assign 'NP' to players without a position given data_clean['Position'] = data_clean['Position'].fillna('NP') # - # Position Rating expressions are evaluated to consider a player's real-world in-season improvements the game factors in and reflect the latest FIFA 19 player data. Goalkeepers (GK) and players without a position (NP) have null position ratings and these values are replaced with 0. # + # list of positions that are rated positions = ['LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB'] for pos in positions: data_clean[pos] = data_clean[pos].str.split('+').str[0].astype(float) + data_clean[pos].str.split('+').str[1].astype(float) data_clean[positions] = data_clean[positions].fillna(0) # - # ## Outliers # Outliers are defined as data that are beyond +/- 3 standard deviations for its respective column. # Goalkeeper (GK)-specific attribute columns (i.e. GKDiving, GKHandling, GKKicking, GKPositioning, GKReflexes) are kept in the dataset with outliers because the majority of players in the dataset do not play the position of GK. These data are likely to be skewed and not be normally distributed rendering the outlier criteria used inadmissable. # + # function to detect outlier and count number of outliers per numerical column def numOutliers(col_name, df): mean_col = np.mean(df[col_name]) std_col = np.std(df[col_name]) outliers = df[(df[col_name] < mean_col - 3*std_col) | (df[col_name] > mean_col + 3*std_col)] return outliers.shape[0] cols = ['Age', 'Overall', 'Potential', 'Value', 'Wage', 'Special', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Height', 'Weight', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Crossing', 'Finishing', 'HeadingAccuracy', 'ShortPassing', 'Volleys', 'Dribbling', 'Curve', 'FKAccuracy', 'LongPassing', 'BallControl', 'Acceleration', 'SprintSpeed', 'Agility', 'Reactions', 'Balance', 'ShotPower', 'Jumping', 'Stamina', 'Strength', 'LongShots', 'Aggression', 'Interceptions', 'Positioning', 'Vision', 'Penalties', 'Composure', 'Marking', 'StandingTackle', 'SlidingTackle', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes'] n_outliers = [] for col in cols: n_outliers.append(numOutliers(col, data_clean)) outliers = np.column_stack((cols, n_outliers)) outliers = outliers[0:-5] print(outliers) # - # ## Assigning a New Value for Outliers # To retain as much data as possible and because outliers were scattered across various players in different columns (i.e. all of Player X's attribute values were not outliers), all player data in the dataset at this point were retained and their outlier values were either assigned the +3 standard deviation value -3 standard deviation value depending on whether the outlier value is greater or less than the column mean respectively. This is to preserve the interval nature of the data; for example, "Player X's value is greater than the mean". When the data is originally expressed as an integer, which is all the data in the columns that were checked for outliers except for Value and Wage, the new assigned value for the outlier is the nearest integer that does not go beyond +/- 3 standard deviations. # + # function to assign a new value to outliers import math def changeOutliers(outlier, df): if outlier[1] != 0: col_name = outlier[0] mean_col = np.mean(df[col_name]) std_col = np.std(df[col_name]) low_end = mean_col - 3*std_col high_end = mean_col + 3*std_col df_outliers = df[(df[col_name] < low_end) | (df[col_name] > high_end)] if col_name == 'Value' or col_name == 'Wage': for idx, row in df_outliers.iterrows(): if row[col_name] < mean_col: df.at[row['Index'], col_name] = low_end else: df.at[row['Index'], col_name] = high_end else: for idx, row in df_outliers.iterrows(): if row[col_name] < mean_col: df.at[row['Index'], col_name] = math.ceil(low_end) else: df.at[row['Index'], col_name] = math.floor(high_end) return df for col in outliers: changeOutliers(col, data_clean) data_clean # - # ## Write the Cleaned Dataset to File # + # remove index column data_clean = data_clean.drop('Index', axis=1) # write to file data_clean.to_csv('fifa19data_clean_no_outliers.csv', index=False) # -
Code/Data Prep/Week 3 - Data Wrangling and Cleaning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Convolutional Neural Networks # # - Special type of neural network to model data with a known **grid-like topology**. # # # - *What kind of data is modeled with a CNN?* # - 1-D grid of scalars for Time series data # - 2-D grid of pixels for Image data # # # - _Difference between CNN and DNN?_ # - CNNs apply convolution in place of matrix multiplication in at least 1 layer # # ### Convolution Operation # # In the context of Neural Networks, it is an operation between 2 Tensors: Input $f(x)$ and a Kernel $g(x)$. If both functions are 1-Dimensional Data (Like Time series data), then their convolution is: # # $$ # (f*g)(t) = \int_{-\infty}^\infty f(\tau).g(t-\tau) d\tau # $$ # # Represents percentage of area of $g$ that overlaps $f$ at time $\tau$ over all time $\tau$. Since $\tau < 0$ is meaningless and $\tau > t$ represents the value of a function in the future (which we don't know), the integral above is written as below: # # $$ # (f*g)(t) = \int_{0}^t f(\tau).g(t-\tau) d\tau \tag{1} # $$ # # This corresponds to a single entry in the 1-D convolved Tensor $f*g$ (the $t^{th}$ entry). To compute the complete convolved tensor, we need to iterate t over all possible values. # # In Machine Learning, convolution is implemented as **cross-correlation** (convolution without kernel flip). # # $$ # (f\star g)(t) = \bar f(-t)*g(t) # $$ # # Where $\bar f$ is the complex conjugate of $f$. This simplifies to the following. # # $$ # (f \star g)(t) = \int_{-\infty}^\infty \bar f(-\tau).g(t-\tau) d\tau # $$ # # Let $\tau' = -\tau$. So $d\tau' = -d\tau$. # # $$ # (f \star g)(t) = \int_{-\infty}^\infty \bar f(\tau').g(t+\tau') (-d\tau') # $$ # # Flip the Integral # # $$ # (f \star g)(t) = \int_{\infty}^{-\infty} \bar f(\tau).g(t+\tau) d\tau \tag{2} # $$ # # In practice, we may have multi dimentional input Tensors that require a multi Dimensional kernel tensor $g$. Consider an Image input. Convolution is defined as: # # $$ # \begin{aligned} # (I*h)(x, y) &= \int_0^x \int_0^y I(i, j).h(x-i, y-j) di dj \\ # &= \int_0^x \int_0^y I(x-i, y-j).h(i, j) di dj # \end{aligned} # \tag{3} # $$ # # There are _usually_ less possible values for $x, y$ in a kerel than in an Image. Hence we use the latter form. The result will be a scalar value of the convolution. We need to repeat the process for every point $(x, y)$ for which a convolution exists on the image. store each value in the convolved matrix $I*h$. This output is sometimes called a **feature map**. # # # ### Discrete convolution # # Discrete convolution (in 1 Dimensional or univariate) can be done by converting one matrix (either input or impulse) into a **Toeplitz Matrix**. In this matrix, each row entry is displaced by 1 column. # # Consider the input : # $$ # # X = # \begin{bmatrix} # x_0 & x_1 & x_2 & . & . & . & x_n # \end{bmatrix} # # X_{Toeplitz} = # \begin{bmatrix} # x_0 & x_1 & x_2 & ... & x_n & 0 & 0 & 0 & ... & 0\\ # 0 & x_0 & x_1 & x_2 & ... & x_n & 0 & 0 & ... & 0\\ # 0 & 0 & x_0 & x_1 & x_2 & ... & x_n & 0 & ... & 0\\ # ... \\ # ... \\ # ... \\ # 0 & 0 & 0 & ... & x_0 & x_1 & x_2 & 0 & ... & x_n\\ # \end{bmatrix} # # h = # \begin{bmatrix} # h_0 \\ # h_1 \\ # h_2 \\ # . \\ # . \\ # . \\ # h_m # \end{bmatrix} # # y = X_T*h = X_T.h # # $$ # # Taking the toeplitz of the input is shifting the input over time (one time step per row).The matrix above moves the input $X$ along $h$ # # ### Why convolution? # # Three reasons: # - Sparse Interactions # - Parameter Sharing # - Equivariant representations # # ### Sparse Interactions # # - In a DNN, every neuron in one layer is connected to every other in the next. In a CNN, each neuron is connected to only _k_ neurons in the next (where k is the size of the kernel in 1-D). # - **Required Parameters Estimated reduces**: In a DNN with $m$ input neurons and $n$ output neurons, number of parameters = $O(m \times n)$. CNN number of parameters = $O(k \times n)$ # - Viewed from below, a neuron in a DNN affects every neuron in the next layer. A neuron in a CNN only affects _k_ neurons in the next layer. # - Viewed from above the **receptive field** of a neuron in a DNN includes all neurons in the previous layer. The receptive field of a neuron in a CNN only consists of k neurons. # - **Indirect interactions exist**: Consider a kernel of size 3. the receptive field of a neuron in layer 4 inclues 3 neurons in layer 3. It also includes the receptive field of these 3 neurons and so on. Hence Indirect interactions exist despite sparse connectivity. # ### Parameter Sharing # # - In a DNN, the parameters learned (weights and biases) are the only used once for the entries between a specific pair of layers. # - In a CNN, the same kernel is used through every point in the input (except for some boundary points) # - In DNN, Space = $O(m \times n)$ Time = $O(m \times n)$ # - In CNN, Space = $O(k)$ Time = $O(k \times n)$ # - Hence parameter sharing decreases storage. # # ### Equivariant # # - Assume we perform a linear transformation on the input. The output of convolution will not be affected whether performed before or after the transformation. It will be modified by an amount equal to the effect of translation on the input. # - E.g. Convolution is equivariant with an operation that assigns every pixel to it's left in 2-D (or translation of a signal linearly in 1-D time series) # - E.g. 2 Convolution is _not_ equivariant with operations like image rotation. # ## Pooling # # 1. **Parallel Convolutions **: Perform several convolutions in parallel. The output is a set of linear activations. But we want non-linear activations as the former doesn't actually learn much. (It's as good as logistic regression for classification). https://www.coursera.org/learn/neural-networks-deep-learning/lecture/OASKH/why-do-you-need-non-linear-activation-functions # # 2. **Detector Stage**: Perform a set of non linear activations. High values are those images which are ismilar # 3. **pooling**: # # Pooling is a summary statistic. It picks a pixel to represent it's immidiate neighbors. Hence, the output of pooling is **invariant** with respect to small linear transaltions. So changing adding small translation to an image doesn't affect output of pooling. E.g. Consider the problem of digit recognition. We have a sample image (of say the number 5). # # Used in case of object detection, rather than accurately pin point the location of object in an image. E.g. In face detection, we are only concerned about the presence of an oval with 2 eyes, a nose and mouth. We are _not_ concerned with the pixel positions of these parts in the object. # Try Time series analysis: # - Kaggle Stock data: https://www.kaggle.com/camnugent/sandp500/data # - Time series analysis blog: https://codeburst.io/neural-networks-for-algorithmic-trading-volatility-forecasting-and-custom-loss-functions-c030e316ea7e # - Classification: https://github.com/Rachnog/Deep-Trading/blob/master/multivariate/multivariate.py#L77 # - Regression: https://github.com/Rachnog/Deep-Trading/blob/master/volatility/volatility.py #
notebooks/CNN Notes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Active Learning # > A programming introduction to query by committee strategy for active learning # # - toc: true # - badges: true # - comments: true # - author: <NAME> # - categories: [ML] # We all get it - AI is the new electricity. Deep neural nets are everywhere around us. But you know what, getting labelled training data can still be a big issue in many domains. This is where active learning comes in - given that we only have a small amount of labelled data, do we randomly get labels for other samples, or can we create a `smarter` strategy for the same? Active learning deals with the latter. # # Various strategies for active learning have been proposed in the past. In this post, I'll work out a trivial example of what is called query by committee. The key idea is that we create a committee of learners and choose to acquire labels for the unlabelled points for which there is maximum disaggrement amongst the committee. # # I'd recommend the new readers to go through [this](http://burrsettles.com/pub/settles.activelearning.pdf) survey. # # In this particular post, I'd be looking at active learning via query by committee, where the committee members are trained on different subsets of the train data. In a future post, I'll write about active learning via query by committee, where the committee members are trained on the same data, but with different parameters. # #### Standard imports import numpy as np import matplotlib.pyplot as plt import pandas as pd import warnings warnings.filterwarnings('ignore') np.random.seed(0) # %matplotlib inline # #### Creating dataset X = np.arange(1, 1001, 1) Y = 10*X + 4 + 400* np.random.randn(1000, ) plt.scatter(X, Y, s=0.1) plt.xlabel("X") plt.ylabel("Y") # #### Learning a linear regression model on the entire data from sklearn.linear_model import LinearRegression clf = LinearRegression() clf.fit(X.reshape(-1,1), Y) clf.intercept_ clf.coef_ # #### Visualising the fit plt.scatter(X, Y, s=0.1) plt.xlabel("X") plt.ylabel("Y") plt.plot(X, clf.coef_[0]*X + clf.intercept_, color='k', label='Best fit on all data') plt.legend() plt.text(500, clf.coef_[0]*500 + clf.intercept_ +4000, "Y = {0:0.2f} X + {1:0.2f}".format(clf.coef_[0], clf.intercept_) ) # #### Creating the initial train set, the test set and the pool from sklearn.model_selection import train_test_split train_pool_X, test_X, train_pool_Y, test_Y = train_test_split(X, Y, test_size = 0.5) train_X, pool_X, train_Y, pool_Y = train_test_split(train_pool_X, train_pool_Y, test_size=495) plt.scatter(train_X, train_Y) # #### Creating a committee each learnt on different subset of the data committee_size = 5 # + train_X_com = {0:{}} train_Y_com = {0:{}} models_com = {0:{}} iteration = 0 for cur_committee in range(committee_size): train_X_com[iteration][cur_committee], _, train_Y_com[iteration][cur_committee], _ = train_test_split(train_X, train_Y, train_size=0.5, random_state=cur_committee) models_com[iteration][cur_committee] = LinearRegression() models_com[iteration][cur_committee].fit(train_X_com[iteration][cur_committee].reshape(-1,1), train_Y_com[iteration][cur_committee]) # - # #### Plotting the fit of the committee on the entire dataset plt.scatter(X, Y, s=0.2) for cur_committee in range(committee_size): plt.plot(X, models_com[0][cur_committee].coef_[0]*X + models_com[0][cur_committee].intercept_, label='Model {0}\nY = {1:0.2f} X + {2:0.2f}'.format(cur_committee, models_com[0][cur_committee].coef_[0], models_com[0][cur_committee].intercept_)) plt.legend() # #### Evaluate the performance on the test set estimations_com = {0:{}} for cur_committee in range(committee_size): estimations_com[0][cur_committee] = models_com[0][cur_committee].predict(test_X.reshape(-1, 1)) test_mae_error = {0:(pd.DataFrame(estimations_com[0]).mean(axis=1) - test_Y).abs().mean()} # The MAE on the test set is: test_mae_error[0] # #### Active learning procedure # + num_iterations = 20 points_added_x=[] points_added_y=[] print("Iteration, Cost\n") print("-"*40) for iteration in range(1, num_iterations): # For each committee: making predictions on the pool set based on model learnt in the respective train set estimations_pool = {cur_committee: models_com[iteration-1][cur_committee].predict(pool_X.reshape(-1, 1)) for cur_committee in range(committee_size)} # Finding points from the pool with highest disagreement among the committee - highest standard deviation in_var = pd.DataFrame(estimations_pool).std(axis=1).argmax() to_add_x = pool_X[in_var] to_add_y = pool_Y[in_var] points_added_x.append(to_add_x) points_added_y.append(to_add_y) # For each committee - Adding the point where the committe most disagrees for com in range(committee_size): if iteration not in train_X_com: train_X_com[iteration] = {} train_Y_com[iteration] = {} models_com[iteration] = {} train_X_com[iteration][com] = np.append(train_X_com[iteration-1][com], to_add_x) train_Y_com[iteration][com] = np.append(train_Y_com[iteration-1][com], to_add_y) # Deleting the point from the pool pool_X = np.delete(pool_X, in_var) pool_Y = np.delete(pool_Y, in_var) # Training on the new set for each committee for cur_committee in range(committee_size): models_com[iteration][cur_committee] = LinearRegression() models_com[iteration][cur_committee].fit(train_X_com[iteration][cur_committee].reshape(-1,1), train_Y_com[iteration][cur_committee]) estimations_com[iteration] = {} for cur_committee in range(committee_size): estimations_com[iteration][cur_committee] = models_com[iteration][cur_committee].predict(test_X.reshape(-1, 1)) test_mae_error[iteration]=(pd.DataFrame(estimations_com[iteration]).mean(axis=1) - test_Y).abs().mean() print(iteration, (test_mae_error[iteration])) # - pd.Series(test_mae_error).plot(style='ko-') plt.xlim((-0.5, num_iterations+0.5)) plt.ylabel("MAE on test set") plt.xlabel("# Points Queried") # As expected, the error goes down as we increase the number of points queried # + fig, ax = plt.subplots() import os from matplotlib.animation import FuncAnimation plt.rcParams['animation.ffmpeg_path'] = os.path.expanduser('/Users/nipun/ffmpeg') def update(iteration): ax.cla() ax.scatter(X, Y, s=0.2) ax.set_title("Iteration: {} \n MAE = {:0.2f}".format(iteration, test_mae_error[iteration])) for cur_committee in range(committee_size): ax.plot(X, models_com[iteration][cur_committee].coef_[0]*X + models_com[iteration][cur_committee].intercept_, label='Model {0}\nY = {1:0.2f} X + {2:0.2f}'.format(cur_committee, models_com[iteration][cur_committee].coef_[0], models_com[iteration][cur_committee].intercept_)) ax.scatter(points_added_x[iteration], points_added_y[iteration],s=100, color='red') ax.legend() fig.tight_layout() anim = FuncAnimation(fig, update, frames=np.arange(0, num_iterations-1, 1), interval=1000) plt.close() # - from IPython.display import HTML HTML(anim.to_html5_video()) # From the animation, we can see that how adding a new point to the train set (shown in red) reduces the variation in prediction amongst the different committee members.
_notebooks/2018-06-16-active-committee.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # #### New to Plotly? # Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/). # <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online). # <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! # #### Simple Dropdown Widget # + import pandas as pd import plotly.plotly as py from ipywidgets import widgets from IPython.display import display, clear_output, Image import plotly.graph_objs as go from plotly.widgets import GraphWidget # define our widgets g = GraphWidget('https://plot.ly/~kevintest/1149/') w = widgets.Dropdown( options=['red', 'blue', 'green'], value='blue', description='Colour:', ) # generate a function to handle changes in the widget def update_on_change(change): g.restyle({'marker.color': change['new']}) # set a listener for changes to the dropdown widget w.observe(update_on_change, names="selected_label") display(w) display(g) # + language="html" # <img src="https://cloud.githubusercontent.com/assets/12302455/16567268/929e1f46-41ea-11e6-8163-096e5a4f4502.gif"> # - # #### Dropdown Widget with Real Data # We will be using the `pandas` data package in order to filter our dataset. # + import pandas as pd import plotly.plotly as py from ipywidgets import widgets from IPython.display import display import plotly.graph_objs as go from plotly.widgets import GraphWidget # import data complaints = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/gantt_example.csv') # we will define a function that will handle the input from the dropdown widget def update_plot(complaint): #is_noise = complaints['Complaint Type'] == complaint['new'] is_noise = complaints['Resource'] == complaint['new'] temp = complaints[is_noise] data = temp.groupby(complaints['Borough'])['Resource'].count() x = [] y = [] for i in range(len(data)): x.append(data.index[i]) y.append(data[i]) graph.restyle({ 'x': [x], 'y': [y], }) graph.relayout({'title': 'Number of {} Complaints in New York by Borough'.format(complaint['new'])}) w = widgets.Dropdown( options= list(complaints['Resource'].unique()), value='A', description='Resource', ) graph = GraphWidget('https://plot.ly/~kevintest/1176/') # observe will set a listener for activity on our dropdown menu w.observe(update_plot, names="selected_label") display(w) display(graph) # + # widgets.Dropdown.observe? # + language="html" # <img src="https://cloud.githubusercontent.com/assets/12302455/16567269/929f60e0-41ea-11e6-9bb4-2d3acbdc98f6.gif"> # - # #### Reference help(GraphWidget) # + from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) # ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'dropdown.ipynb', 'python/dropdown-widget/', 'IPython Widgets', 'Interacting with Plotly Charts using Dropdowns', title = 'Dropdown Widget with Plotly', name = 'Dropdown Widget with Plotly', has_thumbnail='true', thumbnail='thumbnail/dropdown-widget.jpg', language='python', page_type='example_index', display_as='chart_events', order=21, ipynb= '~notebook_demo/86') # -
_posts/python/javascript-controls/ipython-widgets/dropdown.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from sklearn.model_selection import train_test_split races_df = pd.read_csv("races.csv", header=[0], encoding="ISO-8859-1") qualifying_df = pd.read_csv("qualifying.csv", header=[0], encoding="ISO-8859-1") driver_standings_df = pd.read_csv("driver_standings.csv", header=[0], encoding="ISO-8859-1") constructor_standings_df = pd.read_csv("constructor_standings.csv", header=[0], encoding="ISO-8859-1") driver_results_df = pd.read_csv("results.csv", header=[0], encoding="ISO-8859-1") constructor_results_df = pd.read_csv("constructor_results.csv", header=[0], encoding="ISO-8859-1") races_list = races_df.loc[:, ['RaceId', 'Year', 'Round', 'CircuitId']] #REFERENCES: https://stackoverflow.com/questions/15891038/change-data-type-of-columns-in-pandas races_list = races_list.apply(pd.to_numeric).sort_values(by=['Year', 'Round']) race_filter = races_list['Year'].between(1950,2019) races_list = races_list[race_filter] print("RACES\n","Shape:", races_list.shape,"and Unique RaceIds:", races_list.RaceId.unique().shape) races_list.Year.unique() required_raceIds = races_list.RaceId.unique() races_list.head() qualifying_data = qualifying_df.loc[:, ['RaceId', 'DriverId', 'ConstructorId', 'QualifyingPosition']] qualifying_data = qualifying_data.apply(pd.to_numeric) qualifying_data.RaceId.unique().shape #Select RaceIds for Races from 2008 to 2019 #REFERECES: https://stackoverflow.com/questions/17071871/how-to-select-rows-from-a-dataframe-based-on-column-values qualifying_dataset = qualifying_data.loc[qualifying_data['RaceId'].isin(required_raceIds)] print("QUALIFYING\n","Shape:", qualifying_dataset.shape,"and Unique RaceIds:", qualifying_dataset.RaceId.unique().shape) qualifying_dataset = qualifying_dataset.sort_values(by=['RaceId', 'DriverId']) qualifying_dataset.to_csv("QualfyingFilteredNew.csv") qualifying_dataset.RaceId.unique().shape driver_standings_df['DriverPoints'] = driver_standings_df['DriverPoints'].astype(int) driver_standings_df.rename(columns={'DriverPoints': 'TotalDriverPoints'}, inplace = True) driver_standings_data = driver_standings_df.loc[:, ['RaceId', 'DriverId', 'TotalDriverPoints']] driver_standings_data.head() driver_results_data = driver_results_df.loc[:, ['RaceId', 'DriverId', 'ConstructorId', 'GridNumber', 'DriverPosition']] driver_results_data.head() driver_results_dataset = driver_results_data.loc[driver_results_data['RaceId'].isin(required_raceIds)] print("DRIVER RESULTS\n","Shape:", driver_results_dataset.shape,"and Unique RaceIds:", driver_results_dataset.RaceId.unique().shape) #driver_results_dataset = driver_results_dataset.sort_values(by=['RaceId', 'DriverId']) driver_results_dataset.to_csv("driverResultsNew.csv") driver_results_dataset.head() result = pd.merge(driver_results_dataset, qualifying_dataset, on=['RaceId', 'DriverId', 'ConstructorId'], how='left') result.to_csv("Resultant Data New.csv") result.head() final_dataset = pd.read_csv('Resultant Data New.csv') final_dataset.head() new_finalSet = pd.merge(final_dataset, races_list, on = ['RaceId'], how = 'left') new_finalSet = new_finalSet.iloc[:, [1, 2, 3, 4, 5, 6, 9]] print("Shape:",new_finalSet.shape) new_finalSet.head() # REFERENCES: https://stackoverflow.com/questions/13148429/how-to-change-the-order-of-dataframe-columns new_set = pd.merge(new_finalSet, driver_standings_data, on = ['RaceId', 'DriverId'], how = 'left') new_set.fillna(0, inplace = True) new_set['TotalDriverPoints'] = new_set['TotalDriverPoints'].astype(int) new_order = [0,1,2, 3, 5, 6, 7, 4] new_set = new_set[new_set.columns[new_order]] new_set.head() dataset_with_points = new_set dataset_with_points.to_csv('DataSetWithPointsNew.csv') dataset_with_points = dataset_with_points.iloc[:, 1:] dataset_with_points.head() dataset_with_points['DriverPosition'] = dataset_with_points['DriverPosition'].replace(['\\N'], '22') filterData = dataset_with_points[dataset_with_points['DriverPosition'] != '\\N'] dataset_with_points['DriverPosition'] = dataset_with_points['DriverPosition'].astype(int) filterData.shape # REFERENCES: https://www.pluralsight.com/guides/importing-and-splitting-data-into-dependent-and-independent-features-for-ml X = filterData.iloc[:, :-1].values Y = filterData.iloc[:, -1].values X_train, X_test, Y_Train, Y_Test = train_test_split(X, Y, test_size=0.1) print("Train Data:",X_train.shape,"and Test Data:",X_test.shape) # + from sklearn.linear_model import LogisticRegression from sklearn.model_selection import KFold from sklearn import metrics kfold = KFold(n_splits = 5) lr_model = LogisticRegression(solver='lbfgs', max_iter=10000) model = lr_model.fit(X_train, Y_Train) Y_pred = lr_model.predict(X_train) count_misclassified = (Y_Test != Y_pred).sum() print('Misclassified samples: {}'.format(count_misclassified)) accuracy = metrics.accuracy_score(Y_Test, Y_pred) print('Accuracy: {:.2f}'.format(accuracy)) # + from sklearn.tree import DecisionTreeClassifier dtree_model = DecisionTreeClassifier(max_depth = 8).fit(X_train, Y_Train) dtree_predictions = dtree_model.predict(X_test) count_misclassified = (Y_Test != dtree_predictions).sum() print('Misclassified samples: {}'.format(count_misclassified)) accuracy = metrics.accuracy_score(Y_Test, dtree_predictions) print('Accuracy: {:.2f}'.format(accuracy)) # - # ##### from sklearn.neighbors import KNeighborsClassifier # knn = KNeighborsClassifier(n_neighbors = 20).fit(X_train, Y_Train) # # # accuracy on X_test # accuracy = knn.score(X_test, Y_Test) # print (accuracy) #
All Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # + library(tidyverse) library(ggupset) rslt <- '../results/snp_models/' # + tags=[] read_models <- function(x, method) { read_delim(paste0(rslt, x), delim = ' ', col_types = 'cc') %>% mutate(uniq_snp_id = cbind(SNP1, SNP2) %>% apply(1, sort) %>% apply(2, paste, collapse = "_"), method = method) %>% select(method, uniq_snp_id) } scientific_10 <- function(x) { parse(text=gsub("e", " %*% 10^", scales::scientific_format()(x))) } models_overlap <- bind_rows(read_models('physical.txt', 'Positional'), read_models('eqtl.txt', 'eQTL'), read_models('chromatin.txt', 'Chromatin'), read_models('eqlt_chrom.txt', 'eQTL + Chromatin'), read_models('eqtl_chrom_phys.txt', 'Positional + eQTL + Chromatin')) %>% group_by(uniq_snp_id) %>% summarize(methods = list(method)) %>% ggplot(aes(x = methods)) + geom_bar() + labs(x = 'Analysis', y = '# SNP models') + scale_x_upset(sets = c('Positional', 'eQTL', 'Chromatin', 'eQTL + Chromatin', 'Positional + eQTL + Chromatin')) + scale_y_log10(label=scientific_10) # - ggsave(models_overlap, filename = 'fig/models_comparison.pdf', width=10, height=8, bg = "transparent")
doc/snp_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # !wget https://storage.googleapis.com/xlnet/released_models/cased_L-12_H-768_A-12.zip # # !unzip cased_L-12_H-768_A-12.zip # + import sentencepiece as spm from prepro_utils import preprocess_text, encode_ids sp_model = spm.SentencePieceProcessor() sp_model.Load('xlnet_cased_L-12_H-768_A-12/spiece.model') # + from prepro_utils import preprocess_text, encode_ids def tokenize_fn(text, sp_model): text = preprocess_text(text, lower = False) return encode_ids(sp_model, text) # + SEG_ID_A = 0 SEG_ID_B = 1 SEG_ID_CLS = 2 SEG_ID_SEP = 3 SEG_ID_PAD = 4 special_symbols = { "<unk>" : 0, "<s>" : 1, "</s>" : 2, "<cls>" : 3, "<sep>" : 4, "<pad>" : 5, "<mask>" : 6, "<eod>" : 7, "<eop>" : 8, } VOCAB_SIZE = 32000 UNK_ID = special_symbols["<unk>"] CLS_ID = special_symbols["<cls>"] SEP_ID = special_symbols["<sep>"] MASK_ID = special_symbols["<mask>"] EOD_ID = special_symbols["<eod>"] # + import collections import re def get_assignment_map_from_checkpoint(tvars, init_checkpoint): assignment_map = {} initialized_variable_names = {} name_to_variable = collections.OrderedDict() for var in tvars: name = var.name m = re.match('^(.*):\\d+$', name) if m is not None: name = m.group(1) name_to_variable[name] = var init_vars = tf.train.list_variables(init_checkpoint) assignment_map = collections.OrderedDict() for x in init_vars: (name, var) = (x[0], x[1]) if name not in name_to_variable: continue assignment_map[name] = name_to_variable[name] initialized_variable_names[name] = 1 initialized_variable_names[name + ':0'] = 1 return (assignment_map, initialized_variable_names) # + import xlnet as xlnet_lib import tensorflow as tf import numpy as np class Model: def __init__(self, xlnet_config, tokenizer, checkpoint, pool_mode = 'last'): kwargs = dict( is_training = True, use_tpu = False, use_bfloat16 = False, dropout = 0.0, dropatt = 0.0, init = 'normal', init_range = 0.1, init_std = 0.05, clamp_len = -1, ) xlnet_parameters = xlnet_lib.RunConfig(**kwargs) self._tokenizer = tokenizer _graph = tf.Graph() with _graph.as_default(): self.X = tf.placeholder(tf.int32, [None, None]) self.segment_ids = tf.placeholder(tf.int32, [None, None]) self.input_masks = tf.placeholder(tf.float32, [None, None]) xlnet_model = xlnet_lib.XLNetModel( xlnet_config = xlnet_config, run_config = xlnet_parameters, input_ids = tf.transpose(self.X, [1, 0]), seg_ids = tf.transpose(self.segment_ids, [1, 0]), input_mask = tf.transpose(self.input_masks, [1, 0]), ) self.logits = xlnet_model.get_pooled_out(pool_mode, True) self._sess = tf.InteractiveSession() self._sess.run(tf.global_variables_initializer()) tvars = tf.trainable_variables() assignment_map, _ = get_assignment_map_from_checkpoint( tvars, checkpoint ) self._saver = tf.train.Saver(var_list = assignment_map) attentions = [ n.name for n in tf.get_default_graph().as_graph_def().node if 'rel_attn/Softmax' in n.name ] g = tf.get_default_graph() self.attention_nodes = [ g.get_tensor_by_name('%s:0' % (a)) for a in attentions ] def vectorize(self, strings): """ Vectorize string inputs using bert attention. Parameters ---------- strings : str / list of str Returns ------- array: vectorized strings """ if isinstance(strings, list): if not isinstance(strings[0], str): raise ValueError('input must be a list of strings or a string') else: if not isinstance(strings, str): raise ValueError('input must be a list of strings or a string') if isinstance(strings, str): strings = [strings] input_ids, input_masks, segment_ids, _ = xlnet_tokenization( self._tokenizer, strings ) return self._sess.run( self.logits, feed_dict = { self.X: input_ids, self.segment_ids: segment_ids, self.input_masks: input_masks, }, ) def attention(self, strings, method = 'last', **kwargs): """ Get attention string inputs from xlnet attention. Parameters ---------- strings : str / list of str method : str, optional (default='last') Attention layer supported. Allowed values: * ``'last'`` - attention from last layer. * ``'first'`` - attention from first layer. * ``'mean'`` - average attentions from all layers. Returns ------- array: attention """ if isinstance(strings, list): if not isinstance(strings[0], str): raise ValueError('input must be a list of strings or a string') else: if not isinstance(strings, str): raise ValueError('input must be a list of strings or a string') if isinstance(strings, str): strings = [strings] method = method.lower() if method not in ['last', 'first', 'mean']: raise Exception( "method not supported, only support ['last', 'first', 'mean']" ) input_ids, input_masks, segment_ids, s_tokens = xlnet_tokenization( self._tokenizer, strings ) maxlen = max([len(s) for s in s_tokens]) s_tokens = padding_sequence(s_tokens, maxlen, pad_int = '<cls>') attentions = self._sess.run( self.attention_nodes, feed_dict = { self.X: input_ids, self.segment_ids: segment_ids, self.input_masks: input_masks, }, ) if method == 'first': cls_attn = np.transpose(attentions[0][:, 0], (1, 0, 2)) if method == 'last': cls_attn = np.transpose(attentions[-1][:, 0], (1, 0, 2)) if method == 'mean': cls_attn = np.transpose( np.mean(attentions, axis = 0).mean(axis = 1), (1, 0, 2) ) cls_attn = np.mean(cls_attn, axis = 1) total_weights = np.sum(cls_attn, axis = -1, keepdims = True) attn = cls_attn / total_weights output = [] for i in range(attn.shape[0]): output.append( merge_sentencepiece_tokens(list(zip(s_tokens[i], attn[i]))) ) return output # - def merge_sentencepiece_tokens(paired_tokens, weighted = True): new_paired_tokens = [] n_tokens = len(paired_tokens) rejected = ['<cls>', '<sep>'] i = 0 while i < n_tokens: current_token, current_weight = paired_tokens[i] if not current_token.startswith('▁') and current_token not in rejected: previous_token, previous_weight = new_paired_tokens.pop() merged_token = previous_token merged_weight = [previous_weight] while ( not current_token.startswith('▁') and current_token not in rejected ): merged_token = merged_token + current_token.replace('▁', '') merged_weight.append(current_weight) i = i + 1 current_token, current_weight = paired_tokens[i] merged_weight = np.mean(merged_weight) new_paired_tokens.append((merged_token, merged_weight)) else: new_paired_tokens.append((current_token, current_weight)) i = i + 1 words = [ i[0].replace('▁', '') for i in new_paired_tokens if i[0] not in ['<cls>', '<sep>', '<pad>'] ] weights = [ i[1] for i in new_paired_tokens if i[0] not in ['<cls>', '<sep>', '<pad>'] ] if weighted: weights = np.array(weights) weights = weights / np.sum(weights) return list(zip(words, weights)) # + def padding_sequence(seq, maxlen, padding = 'post', pad_int = 0): padded_seqs = [] for s in seq: if padding == 'post': padded_seqs.append(s + [pad_int] * (maxlen - len(s))) if padding == 'pre': padded_seqs.append([pad_int] * (maxlen - len(s)) + s) return padded_seqs def xlnet_tokenization(tokenizer, texts): input_ids, input_masks, segment_ids, s_tokens = [], [], [], [] for text in texts: tokens_a = tokenize_fn(text, tokenizer) tokens = [] segment_id = [] for token in tokens_a: tokens.append(token) segment_id.append(SEG_ID_A) tokens.append(SEP_ID) segment_id.append(SEG_ID_A) tokens.append(CLS_ID) segment_id.append(SEG_ID_CLS) input_id = tokens input_mask = [0] * len(input_id) input_ids.append(input_id) input_masks.append(input_mask) segment_ids.append(segment_id) s_tokens.append([tokenizer.IdToPiece(i) for i in tokens]) maxlen = max([len(i) for i in input_ids]) input_ids = padding_sequence(input_ids, maxlen, padding = 'pre') input_masks = padding_sequence( input_masks, maxlen, padding = 'pre', pad_int = 1 ) segment_ids = padding_sequence( segment_ids, maxlen, padding = 'pre', pad_int = SEG_ID_PAD ) return input_ids, input_masks, segment_ids, s_tokens # - xlnet_config = xlnet_lib.XLNetConfig(json_path = 'xlnet_cased_L-12_H-768_A-12/xlnet_config.json') xlnet_checkpoint = 'xlnet_cased_L-12_H-768_A-12/xlnet_model.ckpt' model = Model( xlnet_config, sp_model, xlnet_checkpoint, pool_mode = 'last' ) model._saver.restore(model._sess, xlnet_checkpoint) model.vectorize('i love u').shape model.attention('i love u sooo much!', method = 'first') model.attention('i love u sooo much!', method = 'last') model.attention('i love u sooo much!', method = 'mean')
visualization/4.xlnet-attention.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Projeto 1: Unsupervised Learning # ## Grupo # Nomes: <NAME>, <NAME> # Ra: , 216179 # # Divisão do trabalho # Todos os algoritmos, resultados e documentos, incluindo este relatório foram produzidos pelo aluno <NAME>. # A escolha do dataset adicional foi feita em conjunto pela Dupla. # # Tratamento dos Dados # # Foram usados dois datasets em nosso projeto, eles podem ser encontrados em: "../data". O primerio foi um dataset de duas dimensões fornecido pela professora, o segundo foi um dataset sobre ataques cardíacos encontrado em [Dataset ataque cardíaco](https://www.kaggle.com/fedesoriano/stroke-prediction-dataset). Em ambos os datasets optou-se por fazer fazer a leitura dos dados e depois seriaizá-los em formato JSON para evitar execuções desnecessárias do algoritmo de recuperação dos dados e por JSON ser mais fácil de recuperar que os formatos fornecidos. O padrão dos objetos JSON será exibido a seguir: # ![teacher data](teacherData.png) # ![stroke data](strokeData.png) # <b>Imagem 1 e 2:</b> Formato dos objetos no JSON serializado # Os notebooks onde este tratamento é feito se encontram em [Teacher Data](../Notebooks/parsingData1_ra216179.ipynb) e [Stroke Data](../Notebooks/parsingData1_ra216179.ipynb). Para o dataset fornecido o tratamento foi apenas verificar se havia dados não numéricos entre os dados corretos, por meio de tratamento de exceção. Para o outro dataset o tratamento será explicado em detalhes mais abaixo. Nesta etapa nenhum dado foi normalizado. # O dataset sobre ataques cardíacos possui 12 features, uma delas, porém, o ID, representa um identificador único de cada indivíduo. Essa feature abrange uma gama muito grande de valores que não possui, provavelmente, nenhum significado e não serve para classificação dos elementos, ou seja, não é relevante para a análise e pode prejudicá-la, portanto optou-se por desprezá-la. As outras features e o tipo de dado que contem são: gênero - classificação (Homem, mulher ou outro), idade - inteiro, hipertensão - booleano, doença cardíaca - booleano, já foi casado - booleano, tipo de trabalho - classificação (Criança, desempregado, autônomo, funcionário privado ou funcionário público), tipo de habitação - classificação (Urbana ou Privada), nível médio de glicose - número contínuo, índice de massa corpórea - número contínuo, status de fumo - classificação (nunca fumou, ex-fumante, fumante ou desconhecido), ataque cardíaco - booleano. # Tendo em vista que os métodos de aprendizado não supervisionado utilizados precisam de números para trabalhar, todos os dados que não fossem numéricos precisaram ser adaptados, utilizando uma codificação desenvolvida por um dos membros da equipe que será explicada abaixo # Gênero: # Mulher : 1 # Outro : 2 # Homem : 3 # # Essa decisão foi tomada com base numa ideia de proximidade, Mulher e homem são diferentes e opostos em termos classificatórios já outro, o desenvolvedor da codificação optou por não entrar em discussões que fogem ao escopo do projeto, porém interpretou que está igualmente perto de homem ou mulher, sem privilegiar nenhum. Por isso foi escolhidos os valores referenciados acima # Tipo de trabalho: # Criança : 0 # Desempregado : 1 # Autônomo : 2 # Funcionário privado : 3 # Funcionário público : 4 # # Neste caso, a classificação se deu pelo nível de formalidade da ocupação, quanto mais formal, maior o número, crianças ficam com zero pois não podem trabalhar. Desta forma, tipos de emprego mais próximos, em questão de formalidade, ficam mais próximos e os muito díspares ficam mais distantes entre si. # Tipo de habitação: # Rural : 0 # Urbano : 1 # # Essa feature é binária, portanto, 0 e 1 foi a escolha natural para esse caso, qualquer outros dois valores diferentes poderiam ser usados, esses porém mantém a consistência com as dimensões booleanas do dataset. # Status de fumo: # Nunca fumou : 0 # Parou de fumar: 1 # Fuma : 2 # Desconhecido : -1 # # Neste caso, fumantes e não fumantes são opções diametralmente opostas, com ex-fumante estando entre as duas opções por já ter fumado, porém não fumar mais. Há uma quarta opção, desconhecido, que significa que esse dado não está disponível e deixou-se em -1, uma vez que todos os dados são positivos. Essa escolha é consistente com outras dimensões, que optou-se por marcar -1 toda vez que houvesse uma célula vazia. # Para as outras dimensões, manteve-se o número que estava na entrada, apenas tomando o cuidado de verificar se era um valor válido, se não, registrou-se -1. A dimensão "Já foi casado" ao invés de 0 e 1, como nas outras colunas booleanas, tinha "sim e não", que foram devidamente substituídas por 0 e 1, mantendo-se a consistência dos dados. # As leituras nesse dataset foram feitas por meio da biblioteca Pandas, que possui uma série de funções adequadas para leitura e manipulação de dados, o que inclui leitura de arquivos CSV (comma separated values), o formato de entrada deste dataset. # O dataset fornecido foi lido por meio das funções padrão de leitura de arquivos da liguagem Python, devido ao seu formato de dois números por linha separados por espaço, sem cabeçalho. # Ambas as fontes de dados foram separadas em conjuntos de aprendizado e teste de tamanho aproximadamente 90% e 10% respectivamente. Isso se deu por meio de escolha aleatória de 10% dos pontos de cada dataset, algoritmo que mantém a distribuição da amostra retirada. Um dos códigos feitos para isso será mostrado abaixo, o código para o outro dataset é análogo e pode ser encontrado junto com todo o código desta etapa do projeto nos notebooks anteriormente anexados. # Um campo cluster, com valor -1, foi adicionado a cada um dos pontos em ambos datasets para utilização em etapas posteriores. try: f = open("../data/cluster.dat") except: print("Houston we've got a problem") dataset = [] testset = [] for i in f: rVar = random.randint(1,10) stringList = i.split() try: dicti = { "x" : float(stringList[0]), "y" : float(stringList[1]), "cluster" : -1 } except ValueError: print("You had a Value Error") break except: print("You got another Error") break if(rVar == 5): testset.append(dicti) else: dataset.append(dicti) f.close() # # Kmeans # O código desta etapa se encontra no notebook [Kmeans](../Notebooks/kMeans_ra216179.ipynb). # A primeira coisa feita nesta etapa foi a leitura dos dados no JSON produzido na etapa anterior. O dicionário lido está num formato facilmente compreensível para humanos, porém difícil de se trabalhar na implementação, portanto para esta etapa ele foi salvo numa nova estrutura de dados, um dicionário com dois campos: cluster, onde fica salvo a lista com o código do cluster de cada ponto respectivamente e o segundo campo é matrix, uma matriz em que cada linha representa uma feature e cada coluna representa o valor da feature para o ponto i. O código será mostrado a seguir. Esta estrutura de dados se provou problemática por não manter todas as dimensões de cada ponto numa estrutura só, mas em listas separadas, o que contraria a interface de muitas das funções das bibliotecas usadas. Ela também não usa np.arrays. e sim listas python, que são muito mais ineficientes e difíceis de trabalhar com as funções da biblioteca Numpy, exigindo que várias conversões fossem feitas ao longo do código, o que é altamente ineficiente. Ela foi mantida apenas para não ser necessário refatorar todo o código que já tinha sido feito usando ela, porém foi substituída na etapa seguinte. def getDataset(dataset): cleanDataset = { "matrix" : [], "cluster": [], } for i in dataset[0]: if i != "cluster": cleanDataset["matrix"].append(getList(dataset,i)) elif i == "cluster": cleanDataset["cluster"] = getList(dataset,"cluster") return cleanDataset exist = True try: f = open("../data/teacherData.json","r") stringSet = f.read() f.close() except: print("Arquivo não existe") exist = False if(exist): dataset = json.loads(stringSet) nDataset = getDataset(dataset) plt.scatter(nDataset["matrix"][0],nDataset["matrix"][1]) plt.title("Original Data") plt.xlabel("x") plt.ylabel("y") plt.grid(True) plt.savefig("../images/originalData1.png") # O código da nossa implementação do Kmeans será anexado em seguida. A distância utilizada foi a distância Euclidiana e ass estratégias de inicialização foram Forgy, que escolhe k pontos aleatórios do dataset e os designa com centro de cada um dos clusters e depois executa o algoritmo. Todas as funções auxiliares podem ser encontradas no notebook anteriormente mencionado e não serão anexadas para manter a concisão deste relatório. def kmeans(dataset,k): centers = [] centers = initialize(dataset["matrix"],k) for i in range(len(dataset["cluster"])): dataset["cluster"][i] = getCluster(getPoint(dataset["matrix"],i),centers) k = 0 oldCenters = copy.deepcopy(centers) while(True): for i in range(len(centers)): newCenter = getCenter(getClusterPoints(dataset,i)) if newCenter == None: centers[i] = [0]*len(centers[i]) continue centers[i] = newCenter for i in range(len(dataset["cluster"])): dataset["cluster"][i] = getCluster(getPoint(dataset["matrix"],i),centers) k+=1 if k > MAXIT: break elif (not change(centers,oldCenters)) and k > 0: break oldCenters = copy.deepcopy(centers) print(k) dataset["centers"] = centers return dataset # Este código segue os princípios do Kmeans, ele inicializa o dataset seguindo as estratégias adotadas, então a cada iteração ele encontra novos centros para cada cluster e depois atualiza o cluster de cada ponto, parando quando todos os centros de cada cluster não se moverem mais, dentro de uma margem de erro pré estabelecida. Também foi estabelecido um valor máximo de iterações permitidas, para evitar a situação de um loop infinito no caso de um bug passar despercebido no código # ## Efeito da normalização # Observou-se que esse algoritmo é altamente sensível à diferença na ordem de grandeza entre as dimensões. O algoritmo produziu resultados diferentes do esparado com os dados não normalizados e serão comparados com e sem normalização para o dataset fornecido, para o outro dataset a análise fica muito mais complicada por não se poder plotar gráficos de pontos do resultado e então optou-se por trabalhar apenas com os dados normalizados # ![Original data](../images/originalData1.jpg) # <b>Imagem 3:</b> Dados fornecidos sem normalização # ![No Norm result](../images/clusteredData1n.jpg) # <b>Imagem 4:</b> Resultado sem normalização # ![No Norm Elbow](../images/Elbow1n.jpg) # <b>Imagem 5:</b> Método de Elbow aplicado nos dados não normalizados # ![Norm result](../images/clusteredData1.jpg) # <b>Imagem 6:</b> Resultado com dados normalizados # ![Norm Elbow](../images/Elbow1.jpg) # <b>Imagem 7:</b> Método de Elbow aplicado nos dados normalizados # Como observado, uma das dimensões é várias ordens de grandeza superior à outra, o que acaba enviesando a distância euclidiana. A imagem possui três grupos claros e distintos, o algoritmo conseguiu encontrar esses três clusters para os dados normalizados, porém encontrou grupos errados para os dados não normalizados. Ambas as análisess de Elbow ficaram com o mesmo formato, porém a função de custo para os dados não normalizados possui uma seção quase vertical que vai até 1e8 entre k = 1 e k = 2, o que não acontece com os dados normalizados, que também apresentam custos menores em toda a curva quando comparados com os dados não normalizados. # Mesmo utilizando a inicialização forgy, que é aleatória, obteve-se o mesmo resultado em todas as execuções e também observou-se uma diferença nas quantidades de iterações que podem ser observadas na tabela a seguir: # K | Não normalizado | Normalizado # - | --------------- | ----------- # 1 | 2 | 2 # 2 | 9 | 4 # 3 | 8 | 4 # 4 | 12 | 6 # 5 | 14 | 10 # 6 | 8 | 7 # 7 | 10 | 9 # 8 | 21 | 12 # 9 | 10 | 10 # 10| 34 | 10 # # <b>Tabela 1:</b> Número de iterações # Para cada k o número de iterações é igual ou menor no normalizado em relação ao não normalizado, chegando a apresentar mais de dez iterações de diferença para alguns ks # Desta forma é possível perceber que a análise com dados normalizados é uma opção melhor do que com dados não normalizados. A estratégia de normalização está no código a seguir: def normalize(array): norm = np.linalg.norm(np.array(array)) for i in range(len(array)): array[i] = array[i]/norm return array # Este algoritmo é aplicado para cada lista de features do dataset e a normalização é feita dividindo-se cada elemento da lista pela norma dois da lista. # ## Resultados # As imagens com alguns resultados da aplicação sobre os dados fornecidos foram apresentados na seção anterior e serão discutidos juntamente com os outros resultados agora. # ![Original data](../images/originalData1.jpg) # Os dados fornecidos possuem duas dimensões, aqui chamadas de x e y. Eles estão Disstribuídos numa faixa de 250 a 3750, aproximadamente em x e de 0 a 30, aproximadamente, em y. Visualmente é possível perceber 3 grupos distintos, que podem representar 3 diferentes clusters após a execução do algoritmo. # Ao executar o algoritmo, utilizamos o método de Elbow para escolher o melhor valor de K como parâmetro para o algoritmo e assim obter o melhor modelo. # O código do método de Elbow será anexador a seguir def elbow(dataset,path,r): ks = [] Js = [] results = [] for k in range(1,r+1): dataset = kmeans(dataset,k) results.append(copy.deepcopy(dataset)) Js.append(getObjectiveFunction(3,dataset["centers"],dataset)) ks.append(k) dataset["cluster"] = [-1]*len(dataset["matrix"][0]) plt.plot(ks,Js,marker = "o") plt.title("Elbow Method") plt.xlabel("k") plt.ylabel("Objective Function") plt.grid(True) plt.savefig(path) return results def getObjectiveFunction(k,centers,dataset): J = 0 for i in range(k): clusterPoints = getClusterPoints(dataset,i) for t in clusterPoints: J += math.pow(getDistance(t,centers[i]),2) return J # Este método se baseia no cálculo da função objetiva, ou função de custo para cada K. A fórmula que calcula essa função é: # ![Objective Function](ObjectiveeFunction.png) # <b>Imagem 8:</b> Fórmula de função objetivo # O resultado da aplicação deste método para os dados normalizados foi exibido anteriormente e será exibido novamente adiante: # ![Norm Elbow](../images/Elbow1.jpg) # Este gráfico mostra que o custo cai conforme K aumenta, no entanto, a partir de um ponto de inflexão (k = 3) a queda do valor de J muda de comportamento, deixa de ser um reta e se torna um valor quase constante, logo, não importa quanto aumentamos K, o custo se mantém praticamento o mesmo. Desta forma, podemos concluir que K = 3 é a melhor opção para o parâmetro K, a opção que melhor se adaptará aos dados, sem provocar o fenômeno de Overfitting. # O algoritmo foi executado com K = 3, o resultado será exibido a seguir: # ![Norm result](../images/clusteredData1.jpg) # O gráfico mostra que o algoritmo conseguiu aglomerar os dados nos trẽs clusters que eram visualmente percebidos, com todos os pontos que aparentemente pertenciam a esses clusters sendo designados para os clusters corretos. O custo desta execução foi inferior a 0.1, isto siginifica que a soma das distâncias entre cada ponto e o centro de seu cluster ao quadrado é inferior 0.1. A quantidade de iterações muda a cada execução devido à inicilização aleatória, porém para esta execução registrada, quantidade de iterações foi 4, como exibido pela tabela 1. # O gráfico com o fator de Silhouette para cada K será exibido abaixo: # ![Sillhouette Teacjer K](../images/SilhouettKteacher.jpg) # O outro dataset possui 11 dimensões, portanto, é impossível exibir graficamente a disposição dos pontos e fazer uma análise visual, portanto esta análise deve ser feita de outra forma. Alguns dos dados serão exibidos abaixo: # ![image.png](attachment:0d3b44c3-f75d-410b-a6c5-de029415173e.png) # <b>Imagem 9:</b> Tabela com os dados de ataque cardíaco. # Com esses pontos já é possível perceber que também há uma diferença de várias ordens de grandeza entre muitas das dimenções. Enquanto algumas ficam em 0 e, outras podem varias de 0 a 4 (seguindo o código explicado anteriormente, outras tem valores que podem ultrapassar 200, e essas três dimensões com essa característica, uma é discreta e dificilmente ultrapassa 120, as outras são contínuas, uma porém não passa de 100, enquanto a outra frequentemente está acima de 200. Desta forma, observa-se que uma normalização nesses dados também é necessária. # Como citado, não é possível exibir esses dados graficamente, porém ainda é possível avaliá-los. Um primeira abordagem, como feito anteriormente, é aplicar o método de Elbow, o resultado é exibido a seguir: # ![Elbow Heart](../images/Elbow2.jpg) # <b>Imagem 10:</b> Método de Elbow aplicado aos dados de ataque cardíaco normalizados. # Este gráfico possui um comportamente um pouco diferente do mostrado na Imagem 7, ele também sobre mais com a aleatoriedade da inicialização, tendo seu resultado diferente a cada execução, porém, também é possível perceber um ponto onde a taxa de diminuição do custo se reduz consideravelmente e este ponto é, provavelmente, o k mais indicado para realizar a aplicação do método K-means sobre estes dados. Nesta aplicação, este ponto foi K = 6, porém, k = 9 está com um custo menor em relação a K = 6 e após ele os valores de custo não reduzem muito, então também será considerado. # Um dos métodos iniciais para a análise desses resultados foi a produção de histogramas com a quantidade de pontos por cluster e sua distribuição. Serão exibidos histogramas para K = 3, K = 6, K = 7, K = 9 e K = 25, para fins de comparação com os resultados escolhidos. # ![Contagem K = 3](../images/clusterCount2_3.jpg) # <b>Imagem 11:</b> Quantidade de pontos por Cluster para K = 3 # # ![Contagem K = 6](../images/clusterCount2_6.jpg) # <b>Imagem 12:</b> Quantidade de pontos por Cluster para K = 6 # # ![Contagem K = 7](../images/clusterCount2_7.jpg) # <b>Imagem 13:</b> Quantidade de pontos por Cluster para K = 7 # # ![Contagem K = 9](../images/clusterCount2_9.jpg) # <b>Imagem 14:</b> Quantidade de pontos por Cluster para K = 9 # # ![Contagem K = 25](../images/clusterCount2_25.jpg) # <b>Imagem 15:</b> Quantidade de pontos por Cluster para K = 25 # Vale ressatar que devido a natureza aleatória da inicialização, oss clusters não estão na mesma ordem de um k para outro. # Todos os valores de K, com exceção de 3, apresentam uma distribuição semelhante de pontos por cluster, inclusive os dois valores selecionados, 6 e 9. Em todos há alguns clusters que possuem uma quantidade maior de pontos, uma maioria com uma quantidade mediana e alguns com uma quantidade menor. Este resultado não é um bom indicativo da qualidade de cada modelo, porém, pela forma como a distribuição se mantém, é possível inferir que há sim grupos bem definidos de pontos, que se subdividem conforme o k aumenta, isso seria visto mais facilmente se os dados fossem ordenados pela posição de cada cluster. K = 3 apressenta uma distribuição aproximadamente uniforme, isso não possui muito significado, apenas que todos os dados se concentram igualmente distribuídos em 3 macro setores do hiperespaço. Se os dados forem ordenados pela posição do cluster, essa característica poderia também ser observada nos outros gráficos e assim comprovada. # Outro dado que podemos analisar entre cada K é a quantidade de iterações necessárias para que o algoritmo pare, dada a margem de erro pré-estabelecida. Esses valores serão exibidos para os Ks citados anteriormente a seguir: # K | Número de Iterações # --- | ------------------- # 3 | 7 # 6 | 9 # 7 | 9 # 9 | 7 # 25 | 10 # # <b>Tabela 2:</b> Número de iterações por execução com os dados de Ataque cardíaco. # Todas as opções de K também repetiram uma quantidade semelhante de vezes, então esta também não é uma boa métrica para decidir se o modelo está adequado. # # DBScan # O código desta etapa se encontra no notebook [DBScan](../Notebooks/dbscan_ra216179.ipynb). # Como dito anteriormente, a modelagem dos dados feita na etapa do Kmeans se provou ineficiente, ela então foi substituída por um novo formato. O dicionário com dois campos "cluster" e "matrix" permanece, no entanto, o conteúdo de matrix mudou. Agora, cada linha de matrix é um ponto, que é representado por um dicionário que contém os campos: "coord" - um np.array com as coordenadas do ponto, "neigh" - uma lista de vizinhos a esse ponto, "cluster" - o cluster atribuído a este ponto, inicializado com -2, para indicar que ele não foi visitado pelo algoritmo, "pos" - o índice do ponto na matriz. Este modelo se provou mais eficiente por exigir menos conversões para np.array, e facilitar os cálculos, pois muitas das funções necessitavam das coordenadas de cada ponto juntas num único vetor. Essa abordagem facilitou a implementação e provavelmente diminui o tempo de execução. # O código que faz a leitura e a transformação desses dados será exibido a seguir: def getDataset(dataset): cleanDataset = { "matrix" : [], "cluster": [], } for i in range(len(dataset)): point = getPoint(dataset[i],i) cleanDataset["matrix"].append(point) cleanDataset["cluster"].append(dataset[i]["cluster"]) return cleanDataset exist = True try: f = open("../data/teacherData.json","r") stringSet = f.read() f.close() except: print("Arquivo não existe") exist = False if(exist): dataset = json.loads(stringSet) nDataset = getDataset(dataset) print2d(nDataset,"../images/originalData1.png") nDataset["matrix"] = normalize(nDataset["matrix"]) # O código completo da implementação do algoritmo DBScan, com todas as funções auxiliares, pode ser encontrado no Notebook anexado anteriormente. Em seguida será anexado o código principal da solução. Neste caso a distância escolhida também foi a Euclidiana. # + def dbscan(dataset,den,ep): corePoints = getNeighs(dataset["matrix"],ep,den) getCluster(dataset["matrix"],dataset["cluster"],corePoints) def getNeighs(dataset,ep,den): corePoints = [] for i in range(len(dataset)): for j in range(i+1,len(dataset)): if getDistance(dataset[i]["coord"],dataset[j]["coord"]) < ep: dataset[i]["neigh"].append(j) dataset[j]["neigh"].append(i) if (len(dataset[i]["neigh"]) + 1) >= den: corePoints.append(i) return corePoints def getCluster(matrix,clusters,corePoints): sumCluster = -1 for i in corePoints: if matrix[i]["cluster"] == -2: sumCluster += 1 matrix[i]["cluster"] = sumCluster clusters[i] = matrix[i]["cluster"] for j in matrix[i]["neigh"]: if matrix[j]["cluster"] > -2: continue matrix[j]["cluster"] = matrix[i]["cluster"] clusters[j] = matrix[i]["cluster"] def cleanOutliers(dataset): clean = [] for i in range(len(dataset["cluster"])): if(dataset["cluster"][i] <= -1): clean.append(i) for i in range(len(clean)): del dataset["cluster"][clean[i]-i] del dataset["matrix"][clean[i]-i] # - # O algoritmo consiste em encontrar todos os vizinhos para cada ponto dentro de um raio epsilon, se um ponto possui mais vizinhos, incluindo ele próprio, que um valor de densidade pré determinado, ele é um core point, se ele não tiver, porém é vizinho de um core point, então ele é um border point, por fim, se ele não tem vizinhos suficientes e não é alcançável por um core point, ele é um outlier. Após isso, toma-se todos os core points, todos os core points que são alcançáveis entre si ficam no mesmo cluster, então percorre-se os vizinhos de cada core point, todo ponto que é alcançável por pelo menos um core point fica no cluster deste core point, os que não são são outliers e são desprezados. # A nossa implementação percorre o dataset e para cada ponto, salva o índice dos seus vizinhos na sua lista de vizinhos, construindo um grafo de vizinhanças, então verifica-se quais pontos possuem uma quantidade de vizinhos igual ou superior à densidade e seus índices são adicionados a uma lista de core points. Após essa etapa a lista de core points é percorrida, para cada core point, se ele não foi visitado (cluster = -2) um novo cluster é criado e associado a este core point, se não, ele já possui um cluster associado, então sua lista de vizinhos é percorrida e aqueles que não foram visitados são associados ao cluster deste core point. Por fim, utiliza-se o método cleanOutliers para percorrer o dataset, todo ponto que não possui cluster associado (cluster < 0) é removido do conjunto. # ## Efeito da Normalização # Tendo em vista os resultados e a execução da etapa anterior, essa etapa já executou todo o algoritmo com os dados normalizados. Não se sabe o resultado que seria obtido sem a normalização. No entanto, o DBScan é ainda mais sensível à diferença de grandeza entre as dimensões que o kmeans, uma vez que precisa encontrar a distância entre os pontos dentro de um raio. Pensando nos dados fornecidos, as distâncias no eixo y são da ordem de unidades ou dezenas, as distâncias no eixo x são da ordem de centenas e unidades de milhar. Um círculo não pode ser construído de forma que abranja as duas grandezas, ou o raio será numa ordem de grandez, ou em outra. Desta forma, dependendo da escolha que seja feita, ou todos os pontos, ou maioria, estarão dentro do raio do ponto avaliado, ou nenhum, devido o raio ser grande ou pequeno demais. Como não foi testado, isso não pôde ser observado, é apenas especulação teórica. # A estratégia de normalização é a seguinte: def normalize(matrix): lists = [] for i in range(len(matrix[0]["coord"])): lists.append(np.array(getList(matrix,i))) for i in range(len(lists)): norm = np.linalg.norm(lists[i]) lists[i] = lists[i]/norm for i in range(len(lists[0])): for j in range(len(lists)): matrix[i]["coord"][j] = lists[j][i] return matrix # A estratégia é basicamente a mesma da anterior, encontrar a média de uma feature e dividir cada valor por essa média. Porém como o formato dos dados mudou, uma adaptação foi necessária para juntar cada valor numa lista para sua respectiva feature e assim encontrar a norma, que desta vez foi calculada usando o método adequado da biblioteca NumPy. # Os dados normalizados são: # ![Normalized data](../images/test.jpg) # <b>Imagem 16:</b> Dados normalizados # ## Resultados # Para o dataset fornecido pela professora, os dados originais são os mesmos da Imgem 3. # Por possuir 2 hiperparâmetros, o método de Elbow não foi aplicado para avaliação do resultado deste método, uma vez que ele exigiria a exibição de gráficos 3D para sua completa análise. # Para este dataset o epsilon utilizado foi 0,01 e a densidade foi 5. # Para estes dois valores o resultado obtido foi: # ![Resposta DBScan](../images/dbscanClustered1.jpg) # <b>Imagem 17:</b> Resultado DBScan para dados fornecidos. # O resultado obtido foi, aparentemente, o mesmo obtido pelo Kmeans, que visualmente é o resultado correto, o algoritmo encontrou os três clusters que eram esperados e marcou todos os pontos no cluster correto como visualmente esperado. # Para os dados de ataque cardíaco, ainda existe o mesmo problema do anterior, por serem 11 dimensões não é possível plotar um gráfico com esses dados e observar quais clusters deveriam ser mostrados, outras métricas devem ser utilizadas então para avaliar o resultado. # A primeira métrica usada para avaliar o resultado foi a quantidade de pontos por cluster, neste caso também inclui a quantidade de clusters, pois este não é pre-definido pelo algoritmo, mas decidido pelo próprio algoritmo. # Serão exibidos os resultados para alguns valores de epsilon e alguns valores de densidade, estes resultados serão discutidos depois. # ![Epsilon = 0.02 Density = 10](../images/dbscanCount2_10_002.jpg) # <b>Imagem 18:</b> Pontos por Cluster - Epsilon = 0.02, Densidade = 10 # ![Epsilon = 0.02 Density = 20](../images/dbscanCount2_20_002.jpg) # <b>Imagem 19:</b> Pontos por Cluster - Epsilon = 0.02, Densidade = 20 # ![Epsilon = 0.02 Density = 30](../images/dbscanCount2_30_002.jpg) # <b>Imagem 20:</b> Pontos por Cluster - Epsilon = 0.02, Densidade = 30 # ![Epsilon = 0.02 Density = 40](../images/dbscanCount2_40_002.jpg) # <b>Imagem 21:</b> Pontos por Cluster - Epsilon = 0.02, Densidade = 40 # ![Epsilon = 0.02 Density = 50](../images/dbscanCount2_50_002.jpg) # <b>Imagem 22:</b> Pontos por Cluster - Epsilon = 0.02, Densidade = 40 # ![Epsilon = 0.02 Density = 60](../images/dbscanCount2_60_002.jpg) # <b>Imagem 23:</b> Pontos por Cluster - Epsilon = 0.02, Densidade = 60 # ![Epsilon = 0.02 Density = 70](../images/dbscanCount2_70_002.jpg) # <b>Imagem 24:</b> Pontos por Cluster - Epsilon = 0.02, Densidade = 70 # ![Epsilon = 0.02 Density = 80](../images/dbscanCount2_80_002.jpg) # <b>Imagem 25:</b> Pontos por Cluster - Epsilon = 0.02, Densidade = 80 # ![Epsilon = 0.02 Density = 90](../images/dbscanCount2_90_002.jpg) # <b>Imagem 26:</b> Pontos por Cluster - Epsilon = 0.02, Densidade = 90 # ![Epsilon = 0.05 Density = 10](../images/dbscanCount2_10_005.jpg) # <b>Imagem 27:</b> Pontos por Cluster - Epsilon = 0.05, Densidade = 10 # ![Epsilon = 0.05 Density = 20](../images/dbscanCount2_20_005.jpg) # <b>Imagem 28:</b> Pontos por Cluster - Epsilon = 0.05, Densidade = 20 # ![Epsilon = 0.05 Density = 30](../images/dbscanCount2_30_005.jpg) # <b>Imagem 29:</b> Pontos por Cluster - Epsilon = 0.05, Densidade = 30 # ![Epsilon = 0.05 Density = 40](../images/dbscanCount2_40_005.jpg) # <b>Imagem 30:</b> Pontos por Cluster - Epsilon = 0.05, Densidade = 40 # ![Epsilon = 0.05 Density = 50](../images/dbscanCount2_50_005.jpg) # <b>Imagem 31:</b> Pontos por Cluster - Epsilon = 0.05, Densidade = 40 # ![Epsilon = 0.05 Density = 60](../images/dbscanCount2_60_005.jpg) # <b>Imagem 32:</b> Pontos por Cluster - Epsilon = 0.05, Densidade = 60 # ![Epsilon = 0.05 Density = 70](../images/dbscanCount2_70_005.jpg) # <b>Imagem 33:</b> Pontos por Cluster - Epsilon = 0.05, Densidade = 70 # ![Epsilon = 0.05 Density = 80](../images/dbscanCount2_80_005.jpg) # <b>Imagem 34:</b> Pontos por Cluster - Epsilon = 0.05, Densidade = 80 # ![Epsilon = 0.05 Density = 90](../images/dbscanCount2_90_005.jpg) # <b>Imagem 35:</b> Pontos por Cluster - Epsilon = 0.05, Densidade = 90 # Os resultados foram obtidos testando 10 valores diferentes de densidade para dois diferentes valores de epsilon. O que se observa é que variar a densidade não altera quais clusters foram escolhidos, apenas elimina clusters muito pequenos, que passam a ser considerados como Outliers e os que não são eliminados podem ser ligeiramente diminuídos por perderem seus pontos mais externos, essa mudança, no entanto, não pode ser percebida na escala do gráfico. No entanto, caso um valor muito maior de densidade fosse escolhido eventualmente acabaria fazendo todos os clusters desaparecerem. Por outro lado, a variação escolhida já foi razoavelmente grande e houve poucas mudanças, o que indica que este dataset é bastante denso. Já a alteração do epsilon faz com que os clusters se alteram muito de tamanho, fazendo com que os clusters sejam completamente redimensionados. Aparentemente, o epsilon 0.05 é muito grande e capaz de colocar quase todos os pontos juntos, provavelmente um epsilon um pouco maior produzirá apenas um cluster. A alteração de densidade para este Epsilon produz praticamente nenhuma alteração, corroborando a hipótese de que estes clusters são muito densos. Os quatro Cluster encontrados são um indicativo de que grande parte dos pontos estão concentrados em apenas quatro grandes regiões do hiperespaço. O Epsilon 0.02 é aparentemente melhor para análise, é capaz de diferenciar melhor as clusters e produz de 7-9 clusters bastante representativos, como o K-means também produziu, potencialmente os mesmos, além de alguns clusters residuais que desaparecem com o aumento da densidade. # Apesar desta análise preliminar, esses valores não nos trazem nenhuma informação sobre a qualidade do modelo e outras métricas precisam ser usadas para a avaliação.
project1_ra216179/Report/Report_ra216179.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="jU-X4YJBK1zQ" colab_type="text" # #Ejemplo 13: Series temporales con Keras # + [markdown] id="-CYyQMFEK6y-" colab_type="text" # ##Paso 1: Gestión de los datos # + [markdown] id="9SPU8iz4K8cm" colab_type="text" # En primer lugar, se cargan las bibliotecas necesarias para la gestión de datos # + id="FiqalpF5d9HA" colab_type="code" colab={} import numpy as np import matplotlib.pyplot as plt import pandas as pd # + [markdown] id="6jwAKgZGMQyC" colab_type="text" # **1.1-Carga de datos** # # En este caso, los datos están disponibles como un CSV que se carga desde un directorio. # + id="l0t52WuyeSh3" colab_type="code" outputId="97225608-7515-46db-b1a5-05682d1329ea" executionInfo={"status": "ok", "timestamp": 1554194161415, "user_tz": -120, "elapsed": 29376, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 122} from google.colab import drive drive.mount('/content/drive') # + id="StbI9azxeVuG" colab_type="code" colab={} dataset = pd.read_csv("/content/drive/My Drive/Colab Notebooks/data/NSE-TATAGLOBAL.csv") # + id="l_iA8AeCMTq6" colab_type="code" outputId="c5f4b7ea-adde-440d-8ccb-0fbd38d2c39d" executionInfo={"status": "ok", "timestamp": 1554194422619, "user_tz": -120, "elapsed": 7522, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 204} dataset.head() #print(len(dataset)) # + [markdown] id="bDXd6GYNNH8I" colab_type="text" # Se seleccionan las columnas 1 y 2. # + id="bVUoyNcGefi_" colab_type="code" colab={} training_set = dataset.iloc[:, 1:2].values # + id="WpxZP0oTek7f" colab_type="code" outputId="81f34276-8139-4bc8-b6dc-1757c715b127" executionInfo={"status": "ok", "timestamp": 1554194462073, "user_tz": -120, "elapsed": 4410, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 153} print(len(training_set)) print(training_set) # + [markdown] id="0Ud9ZkprNb9y" colab_type="text" # **1.2-Visualización de los datos** # # * Se puede comprobar la forma que tienen nuestros datos. # + id="7sIj0mURNiR3" colab_type="code" outputId="8afbc721-a6af-4d71-a211-98838f917d31" executionInfo={"status": "ok", "timestamp": 1554194666068, "user_tz": -120, "elapsed": 2445, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 349} plt.figure(figsize = (15, 5)) plt.plot(dataset.iloc[:, 1].values, label = "Open") plt.plot(dataset.iloc[:, 2].values, label = "High") plt.xlabel("Días") plt.ylabel("") plt.title("Valores por días") plt.legend() plt.show() # + [markdown] id="eEOVfw0JN1lO" colab_type="text" # **1.3-Codificar los datos** # # En este caso los datos son numéricos con lo que sólo requieren procesamiento para escalarlos: # # * Los datos ya son numéricos. # * Se realizar una escacla de los datos con MinMax en la zona 0-1, Una ventaja es que se da estabilidad a los datos pero, un problema es que comprime los datos de entrada entre unos límites empíricos (el máximo y el mínimo de la variable). Esto quiere decir que si existe ruido, se va a ampliar. # + id="p77gvmA1ep2N" colab_type="code" colab={} from sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler(feature_range = (0, 1)) training_set_scaled = sc.fit_transform(training_set) # + id="NVxzog1XewHt" colab_type="code" outputId="9872c251-1c05-4144-c5d0-a0abf35653c0" executionInfo={"status": "ok", "timestamp": 1554194834941, "user_tz": -120, "elapsed": 1058, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 136} training_set_scaled # + [markdown] id="1DfNifTSP2Fo" colab_type="text" # **1.4-Seleccionar los datos** # # En este caso, los datos se separan por la ventana que queremos controlar (60 días). # # + id="iMNUSFYQfGU1" colab_type="code" colab={} window_size = 60 X_train = [] #Lista de listas de 60 observaciones y_train = [] #Lista de valores for i in range(window_size, len(training_set)): X_train.append(training_set_scaled[i-60:i, 0]) #Take [0:60,0] [60:120,0] se generan listas de 60 observaciones y_train.append(training_set_scaled[i, 0])#Take [60,0] [120,0] X_train, y_train = np.array(X_train), np.array(y_train) X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) print(len(X_train[0])) # + [markdown] id="_2P05YpvO-lr" colab_type="text" # ## Paso 2: Arquitectura e implementación de nuestra red # # # 1. La entrada de nuestra red será una capa con la forma de los datos de entrada. # 2. La función de activación en la capa de salida se establece para que sea un número. # 4. La función de pérdida será **mse**. # 5. La función de optimización **adam**. # # + id="IMSKvrIve5A9" colab_type="code" colab={} from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Dropout # + id="gCDCwBoge8GN" colab_type="code" colab={} regressor = Sequential() regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1))) regressor.add(Dropout(0.2)) regressor.add(LSTM(units = 50, return_sequences = True)) regressor.add(Dropout(0.2)) regressor.add(LSTM(units = 50, return_sequences = True)) regressor.add(Dropout(0.2)) regressor.add(LSTM(units = 50)) regressor.add(Dropout(0.2)) regressor.add(Dense(units = 1)) regressor.compile(optimizer = 'adam', loss = 'mean_squared_error') # + [markdown] id="Ls4KjKqrQkf-" colab_type="text" # ## Paso 3: Entrenamiento # + id="hUJfRcfEm1W3" colab_type="code" outputId="396d7588-a1dc-4a3f-ea7a-1314ea5d30ca" executionInfo={"status": "ok", "timestamp": 1554202233643, "user_tz": -120, "elapsed": 61526, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 374} from matplotlib import pyplot history = regressor.fit(X_train, y_train, epochs = 10, batch_size = 64) print(history) # + id="doznEBj2rESP" colab_type="code" outputId="a6656412-8d8a-4757-9bfb-024f7a78c95c" executionInfo={"status": "ok", "timestamp": 1554202281391, "user_tz": -120, "elapsed": 1197, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 54} print(history.history) # + [markdown] id="1zNchIcTQqF2" colab_type="text" # ## Paso 4: Test y Predicción # + [markdown] id="XjHlYIAQQoOm" colab_type="text" # En este caso, se va a validar con el conjunto de test: # # # * Se pasa como parámetro el modelo entrenado. # * Se pasan los valores de entrada y los esperados de salida (X,Y) # * Se calculan valores para los datos de entrenamiento y de test. # * Se calcula RMSE (error cuadrático medio). Se busca penalizar tanto los valores por defecto como por exceso. El valor preferente es pequeño indicando que que los valores pronosticados están cerca de los valores observados. # # + id="9RZBdF5ifWVM" colab_type="code" colab={} dataset_test = pd.read_csv("/content/drive/My Drive/Colab Notebooks/data/tatatest.csv") # + id="SYx97EVuffkd" colab_type="code" outputId="56595d48-0c00-4457-bf6e-4069625e8df1" executionInfo={"status": "ok", "timestamp": 1554201373609, "user_tz": -120, "elapsed": 1839, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 289} real_stock_price = dataset_test.iloc[:, 1:2].values print(real_stock_price)#Se obtienen los valores reales # + id="Lf-OtAh7fhh0" colab_type="code" colab={} #Se crea un frame con dos columnas: los valores de entrenamiento y de test dataset_total = pd.concat((dataset['Open'], dataset_test['Open']), axis = 0) inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values print(inputs) #Se preparan los datos en la forma adecudad inputs = inputs.reshape(-1,1) #-1-->la forma se infiere inputs = sc.transform(inputs) #se escalan los datos X_test = [] #se generan la lista de lista de observaciones for i in range(60, 76): X_test.append(inputs[i-60:i, 0]) X_test = np.array(X_test) #Se transforma a un array de numpy X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) predicted_stock_price = regressor.predict(X_test)#Se realiza la predicción obteniendo un array de valores. print(predicted_stock_price) predicted_stock_price = sc.inverse_transform(predicted_stock_price) #Se realiza la inversa para obtener el valor real (previamente se habían normalizado los valores de entrada) print(predicted_stock_price) # + id="GRp34rgpoFxe" colab_type="code" outputId="b08f721b-370b-4890-e817-c10584e34d39" executionInfo={"status": "ok", "timestamp": 1554201670908, "user_tz": -120, "elapsed": 5353, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 578} import math from sklearn.metrics import mean_squared_error print("Valores reales: "+str(real_stock_price)) print("Valores predicción: "+str(predicted_stock_price)) rmse = math.sqrt(mean_squared_error(real_stock_price, predicted_stock_price)) print("Test data score: %.2f RMSE" % rmse) # + id="4_Pk3aLQfpnT" colab_type="code" outputId="64cd405f-a4be-4cd5-95fe-57ac4080ef0c" executionInfo={"status": "ok", "timestamp": 1554201426811, "user_tz": -120, "elapsed": 3210, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 376} plt.plot(real_stock_price, color = 'black', label = 'TATA Stock Price') plt.plot(predicted_stock_price, color = 'green', label = 'Predicted TATA Stock Price') plt.title('TATA Stock Price Prediction') plt.xlabel('Time') plt.ylabel('TATA Stock Price') plt.legend() plt.show()
intro-deep-learning-es/Ejemplo-13.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CNTK 201B: Hands On Labs Image Recognition # This hands-on lab shows how to implement image recognition task using [convolution network][] with CNTK v2 Python API. You will start with a basic feedforward CNN architecture in order to classify Cifar dataset, then you will keep adding advanced feature to your network. Finally, you will implement a VGG net and residual net similar to the one that won ImageNet competition but smaller in size. # # [convolution network]:https://en.wikipedia.org/wiki/Convolutional_neural_network # # ## Introduction # # In this hands-on, you will practice the following: # # * Understanding subset of CNTK python API needed for image classification task. # * Write a custom convolution network to classify Cifar dataset. # * Modifying the network structure by adding: # * [Dropout][] layer. # * Batchnormalization layer. # * Implement a [VGG][] style network. # * Introduction to Residual Nets (RESNET). # * Implement and train [RESNET network][]. # # [RESNET network]:https://github.com/Microsoft/CNTK/wiki/Hands-On-Labs-Image-Recognition # [VGG]:http://www.robots.ox.ac.uk/~vgg/research/very_deep/ # [Dropout]:https://en.wikipedia.org/wiki/Dropout_(neural_networks) # # ## Prerequisites # # CNTK 201A hands-on lab, in which you will download and prepare Cifar dataset is a prerequisites for this lab. This tutorial depends on CNTK v2, so before starting this lab you will need to install CNTK v2. Furthermore, all the tutorials in this lab are done in python, therefore, you will need a basic knowledge of Python. # # CNTK 102 lab is recommended but not a prerequisites for this tutorials. However, a basic understanding of Deep Learning is needed. # # ## Dataset # # You will use Cifar 10 dataset, from https://www.cs.toronto.edu/~kriz/cifar.html, during this tutorials. The dataset contains 50000 training images and 10000 test images, all images are 32x32x3. Each image is classified as one of 10 classes as shown below: # # <img src="https://cntk.ai/jup/201/cifar-10.png", width=500, height=500> # # The above image is from: https://www.cs.toronto.edu/~kriz/cifar.html # # ## Convolution Neural Network (CNN) # # Convolution Neural Network (CNN) is a feedforward network comprise of a bunch of layers in such a way that the output of one layer is fed to the next layer (There are more complex architecture that skip layers, we will discuss one of those at the end of this lab). Usually, CNN start with alternating between convolution layer and pooling layer (downsample), then end up with fully connected layer for the classification part. # # ### Convolution layer # # Convolution layer consist of multiple 2D convolution kernels applied on the input image or the previous layer, each convolution kernel output a feature map. # # <img src="https://cntk.ai/jup/201/Conv2D.png"> # # The stack of feature maps output are the input to the next layer. # # <img src="https://cntk.ai/jup/201/Conv2DFeatures.png"> # # > Gradient-Based Learning Applied to Document Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998 # > <NAME>, <NAME>, <NAME> and <NAME> # # #### In CNTK: # # Here the [convolution][] layer in Python: # # ```python # def Convolution(filter_shape, # e.g. (3,3) # num_filters, # e.g. 64 # activation, # relu or None...etc. # init, # Random initialization # pad, # True or False # strides) # strides e.g. (1,1) # ``` # # [convolution]:https://www.cntk.ai/pythondocs/layerref.html#convolution # # ### Pooling layer # # In most CNN vision architecture, each convolution layer is succeeded by a pooling layer, so they keep alternating until the fully connected layer. # # The purpose of the pooling layer is as follow: # # * Reduce the dimensionality of the previous layer, which speed up the network. # * Provide a limited translation invariant. # # Here an example of max pooling with a stride of 2: # # <img src="https://cntk.ai/jup/201/MaxPooling.png", 200,200> # # #### In CNTK: # # Here the [pooling][] layer in Python: # # ```python # # # Max pooling # def MaxPooling(filter_shape, # e.g. (3,3) # strides, # (2,2) # pad) # True or False # # # Average pooling # def AveragePooling(filter_shape, # e.g. (3,3) # strides, # (2,2) # pad) # True or False # ``` # # [pooling]:https://www.cntk.ai/pythondocs/layerref.html#maxpooling-averagepooling # # ### Dropout layer # # Dropout layer takes a probability value as an input, the value is called the dropout rate. Let's say the dropu rate is 0.5, what this layer does it pick at random 50% of the nodes from the previous layer and drop them out of the nework. This behavior help regularize the network. # # > Dropout: A Simple Way to Prevent Neural Networks from Overfitting # > <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # # #### In CNTK: # # Dropout layer in Python: # # ```python # # # Dropout # def Dropout(prob) # dropout rate e.g. 0.5 # ``` # # ### Batch normalization (BN) # # Batch normalization is a way to make the input to each layer has zero mean and unit variance. BN help the network converge faster and keep the input of each layer around zero. BN has two learnable parameters called gamma and beta, the purpose of those parameters is for the network to decide for itself if the normalized input is what is best or the raw input. # # > Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift # > <NAME>, <NAME> # # #### In CNTK: # # [Batch normalization][] layer in Python: # # ```python # # # Batch normalization # def BatchNormalization(map_rank) # For image map_rank=1 # ``` # # [Batch normalization]:https://www.cntk.ai/pythondocs/layerref.html#batchnormalization-layernormalization-stabilizer # # ## Computational Network Toolkit (CNTK) # # CNTK is a highly flexible computation graphs, each node take inputs as tensors and produce tensors as the result of the computation. Each node is exposed in Python API, which give you the flexibility of creating any custom graphs, you can also define your own node in Python or C++ using CPU, GPU or both. # # For Deep learning, you can use the low level API directly or you can use CNTK layered API. We will start with the low level API, then switch to the layered API in this lab. # # So let's first import the needed modules for this lab. # + from __future__ import print_function import os import numpy as np import matplotlib.pyplot as plt import math from cntk.blocks import default_options from cntk.layers import Convolution, MaxPooling, AveragePooling, Dropout, BatchNormalization, Dense from cntk.models import Sequential, LayerStack from cntk.io import MinibatchSource, ImageDeserializer, StreamDef, StreamDefs from cntk.initializer import glorot_uniform, he_normal from cntk import Trainer from cntk.learner import momentum_sgd, learning_rate_schedule, UnitType, momentum_as_time_constant_schedule from cntk.ops import cross_entropy_with_softmax, classification_error, relu, input_variable, softmax, element_times from cntk.utils import * # - # Now that we imported the needed modules, let's implement our first CNN, as shown below: # # <img src="https://cntk.ai/jup/201/CNN.png"> # # Let's implement the above network using CNTK layer API: def create_basic_model(input, out_dims): net = Convolution((5,5), 32, init=glorot_uniform(), activation=relu, pad=True)(input) net = MaxPooling((3,3), strides=(2,2))(net) net = Convolution((5,5), 32, init=glorot_uniform(), activation=relu, pad=True)(net) net = MaxPooling((3,3), strides=(2,2))(net) net = Convolution((5,5), 64, init=glorot_uniform(), activation=relu, pad=True)(net) net = MaxPooling((3,3), strides=(2,2))(net) net = Dense(64, init=glorot_uniform())(net) net = Dense(out_dims, init=glorot_uniform(), activation=None)(net) return net # To train the above model we need two things: # * Read the training images and their corresponding labels. # * Define a cost function, compute the cost for each mini-batch and update the model weights according to the cost value. # # To read the data in CNTK, we will use CNTK readers which handle data augmentation and can fetch data in parallel. # # Example of a map text file: # # S:\data\CIFAR-10\train\00001.png 9 # S:\data\CIFAR-10\train\00002.png 9 # S:\data\CIFAR-10\train\00003.png 4 # S:\data\CIFAR-10\train\00004.png 1 # S:\data\CIFAR-10\train\00005.png 1 # # + # model dimensions image_height = 32 image_width = 32 num_channels = 3 num_classes = 10 # # Define the reader for both training and evaluation action. # def create_reader(map_file, mean_file, train): if not os.path.exists(map_file) or not os.path.exists(mean_file): raise RuntimeError("This tutorials depends 201A tutorials, please run 201A first.") # transformation pipeline for the features has jitter/crop only when training transforms = [] if train: transforms += [ ImageDeserializer.crop(crop_type='Random', ratio=0.8, jitter_type='uniRatio') # train uses jitter ] transforms += [ ImageDeserializer.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'), ImageDeserializer.mean(mean_file) ] # deserializer return MinibatchSource(ImageDeserializer(map_file, StreamDefs( features = StreamDef(field='image', transforms=transforms), # first column in map file is referred to as 'image' labels = StreamDef(field='label', shape=num_classes) # and second as 'label' ))) # - # Now let us write the the training and validation loop. # # Train and evaluate the network. # def train_and_evaluate(reader_train, reader_test, max_epochs, model_func): # Input variables denoting the features and label data input_var = input_variable((num_channels, image_height, image_width)) label_var = input_variable((num_classes)) # Normalize the input feature_scale = 1.0 / 256.0 input_var_norm = element_times(feature_scale, input_var) # apply model to input z = model_func(input_var_norm, out_dims=10) # # Training action # # loss and metric ce = cross_entropy_with_softmax(z, label_var) pe = classification_error(z, label_var) # training config epoch_size = 50000 minibatch_size = 64 # Set training parameters lr_per_minibatch = learning_rate_schedule([0.01]*10 + [0.003]*10 + [0.001], UnitType.minibatch, epoch_size) momentum_time_constant = momentum_as_time_constant_schedule(-minibatch_size/np.log(0.9)) l2_reg_weight = 0.001 # trainer object learner = momentum_sgd(z.parameters, lr = lr_per_minibatch, momentum = momentum_time_constant, l2_regularization_weight=l2_reg_weight) trainer = Trainer(z, ce, pe, [learner]) # define mapping from reader streams to network inputs input_map = { input_var: reader_train.streams.features, label_var: reader_train.streams.labels } log_number_of_parameters(z) ; print() progress_printer = ProgressPrinter(tag='Training') # perform model training batch_index = 0 plot_data = {'batchindex':[], 'loss':[], 'error':[]} for epoch in range(max_epochs): # loop over epochs sample_count = 0 while sample_count < epoch_size: # loop over minibatches in the epoch data = reader_train.next_minibatch(min(minibatch_size, epoch_size - sample_count), input_map=input_map) # fetch minibatch. trainer.train_minibatch(data) # update model with it sample_count += data[label_var].num_samples # count samples processed so far # For visualization... plot_data['batchindex'].append(batch_index) plot_data['loss'].append(trainer.previous_minibatch_loss_average) plot_data['error'].append(trainer.previous_minibatch_evaluation_average) progress_printer.update_with_trainer(trainer, with_metric=True) # log progress batch_index += 1 progress_printer.epoch_summary(with_metric=True) # # Evaluation action # epoch_size = 10000 minibatch_size = 16 # process minibatches and evaluate the model metric_numer = 0 metric_denom = 0 sample_count = 0 minibatch_index = 0 while sample_count < epoch_size: current_minibatch = min(minibatch_size, epoch_size - sample_count) # Fetch next test min batch. data = reader_test.next_minibatch(current_minibatch, input_map=input_map) # minibatch data to be trained with metric_numer += trainer.test_minibatch(data) * current_minibatch metric_denom += current_minibatch # Keep track of the number of samples processed so far. sample_count += data[label_var].num_samples minibatch_index += 1 print("") print("Final Results: Minibatch[1-{}]: errs = {:0.1f}% * {}".format(minibatch_index+1, (metric_numer*100.0)/metric_denom, metric_denom)) print("") # Visualize training result: window_width = 32 loss_cumsum = np.cumsum(np.insert(plot_data['loss'], 0, 0)) error_cumsum = np.cumsum(np.insert(plot_data['error'], 0, 0)) # Moving average. plot_data['batchindex'] = np.insert(plot_data['batchindex'], 0, 0)[window_width:] plot_data['avg_loss'] = (loss_cumsum[window_width:] - loss_cumsum[:-window_width]) / window_width plot_data['avg_error'] = (error_cumsum[window_width:] - error_cumsum[:-window_width]) / window_width plt.figure(1) plt.subplot(211) plt.plot(plot_data["batchindex"], plot_data["avg_loss"], 'b--') plt.xlabel('Minibatch number') plt.ylabel('Loss') plt.title('Minibatch run vs. Training loss ') plt.show() plt.subplot(212) plt.plot(plot_data["batchindex"], plot_data["avg_error"], 'r--') plt.xlabel('Minibatch number') plt.ylabel('Label Prediction Error') plt.title('Minibatch run vs. Label Prediction Error ') plt.show() return softmax(z) # + data_path = os.path.join('data', 'CIFAR-10') reader_train = create_reader(os.path.join(data_path, 'train_map.txt'), os.path.join(data_path, 'CIFAR-10_mean.xml'), True) reader_test = create_reader(os.path.join(data_path, 'test_map.txt'), os.path.join(data_path, 'CIFAR-10_mean.xml'), False) pred = train_and_evaluate(reader_train, reader_test, max_epochs=5, model_func=create_basic_model) # - # Although, this model is very simple, it still has too much code, we can do better. Here the same model in more terse format: def create_basic_model_terse(input, out_dims): with default_options(activation=relu): model = Sequential([ LayerStack(3, lambda i: [ Convolution((5,5), [32,32,64][i], init=glorot_uniform(), pad=True), MaxPooling((3,3), strides=(2,2)) ]), Dense(64, init=glorot_uniform()), Dense(out_dims, init=glorot_uniform(), activation=None) ]) return model(input) pred_basic_model = train_and_evaluate(reader_train, reader_test, max_epochs=10, model_func=create_basic_model_terse) # Now that we have a trained model, let's classify the following image: # # <img src="https://cntk.ai/jup/201/00014.png", width=64, height=64> # # + from PIL import Image def eval(pred_op, image_path): label_lookup = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"] image_mean = 133.0 image_data = np.array(Image.open(image_path), dtype=np.float32) image_data -= image_mean image_data = np.ascontiguousarray(np.transpose(image_data, (2, 0, 1))) result = np.squeeze(pred_op.eval({pred_op.arguments[0]:[image_data]})) # Return top 3 results: top_count = 3 result_indices = (-np.array(result)).argsort()[:top_count] print("Top 3 predictions:") for i in range(top_count): print("\tLabel: {:10s}, confidence: {:.2f}%".format(label_lookup[result_indices[i]], result[result_indices[i]] * 100)) # - eval(pred_basic_model, "data/CIFAR-10/test/00014.png") # Adding dropout layer, with drop rate of 0.25, before the last dense layer: def create_basic_model_with_dropout(input, out_dims): with default_options(activation=relu): model = Sequential([ LayerStack(3, lambda i: [ Convolution((5,5), [32,32,64][i], init=glorot_uniform(), pad=True), MaxPooling((3,3), strides=(2,2)) ]), Dense(64, init=glorot_uniform()), Dropout(0.25), Dense(out_dims, init=glorot_uniform(), activation=None) ]) return model(input) pred_basic_model_dropout = train_and_evaluate(reader_train, reader_test, max_epochs=5, model_func=create_basic_model_with_dropout) # Add batch normalization after each convolution and before the last dense layer: def create_basic_model_with_batch_normalization(input, out_dims): with default_options(activation=relu): model = Sequential([ LayerStack(3, lambda i: [ Convolution((5,5), [32,32,64][i], init=glorot_uniform(), pad=True), BatchNormalization(map_rank=1), MaxPooling((3,3), strides=(2,2)) ]), Dense(64, init=glorot_uniform()), BatchNormalization(map_rank=1), Dense(out_dims, init=glorot_uniform(), activation=None) ]) return model(input) pred_basic_model_bn = train_and_evaluate(reader_train, reader_test, max_epochs=5, model_func=create_basic_model_with_batch_normalization) # Let's implement an inspired VGG style network, using layer API, here the architecture: # # | VGG9 | # | ------------- | # | conv3-64 | # | conv3-64 | # | max3 | # | | # | conv3-96 | # | conv3-96 | # | max3 | # | | # | conv3-128 | # | conv3-128 | # | max3 | # | | # | FC-1024 | # | FC-1024 | # | | # | FC-10 | # def create_vgg9_model(input, out_dims): with default_options(activation=relu): model = Sequential([ LayerStack(3, lambda i: [ Convolution((3,3), [64,96,128][i], init=glorot_uniform(), pad=True), Convolution((3,3), [64,96,128][i], init=glorot_uniform(), pad=True), MaxPooling((3,3), strides=(2,2)) ]), LayerStack(2, lambda : [ Dense(1024, init=glorot_uniform()) ]), Dense(out_dims, init=glorot_uniform(), activation=None) ]) return model(input) pred_vgg = train_and_evaluate(reader_train, reader_test, max_epochs=5, model_func=create_vgg9_model) # ### Residual Network (ResNet) # # One of the main problem of a Deep Neural Network is how to propagate the error all the way to the first layer. For a deep network, the gradient keep getting smaller until it has no effect on the network weights. [ResNet](https://arxiv.org/abs/1512.03385) was designed to overcome such problem, by defining a block with identity path, as shown below: # # <img src="https://cntk.ai/jup/201/ResNetBlock2.png"> # # The idea of the above block is 2 folds: # # * During back propagation the gradient have a path that doesn't affect its magnitude. # * The network need to learn residual mapping (delta to x). # # So let's implements ResNet blocks using CNTK: # # ResNetNode ResNetNodeInc # | | # +------+------+ +---------+----------+ # | | | | # V | V V # +----------+ | +--------------+ +----------------+ # | Conv, BN | | | Conv x 2, BN | | SubSample, BN | # +----------+ | +--------------+ +----------------+ # | | | | # V | V | # +-------+ | +-------+ | # | ReLU | | | ReLU | | # +-------+ | +-------+ | # | | | | # V | V | # +----------+ | +----------+ | # | Conv, BN | | | Conv, BN | | # +----------+ | +----------+ | # | | | | # | +---+ | | +---+ | # +--->| + |<---+ +------>+ + +<-------+ # +---+ +---+ # | | # V V # +-------+ +-------+ # | ReLU | | ReLU | # +-------+ +-------+ # | | # V V # # + from cntk.ops import combine, times, element_times, AVG_POOLING def convolution_bn(input, filter_size, num_filters, strides=(1,1), init=he_normal(), activation=relu): if activation is None: activation = lambda x: x r = Convolution(filter_size, num_filters, strides=strides, init=init, activation=None, pad=True, bias=False)(input) r = BatchNormalization(map_rank=1)(r) r = activation(r) return r def resnet_basic(input, num_filters): c1 = convolution_bn(input, (3,3), num_filters) c2 = convolution_bn(c1, (3,3), num_filters, activation=None) p = c2 + input return relu(p) def resnet_basic_inc(input, num_filters): c1 = convolution_bn(input, (3,3), num_filters, strides=(2,2)) c2 = convolution_bn(c1, (3,3), num_filters, activation=None) s = convolution_bn(input, (1,1), num_filters, strides=(2,2), activation=None) p = c2 + s return relu(p) def resnet_basic_stack(input, num_filters, num_stack): assert (num_stack > 0) r = input for _ in range(num_stack): r = resnet_basic(r, num_filters) return r # - # Let's write the full model: def create_resnet_model(input, out_dims): conv = convolution_bn(input, (3,3), 16) r1_1 = resnet_basic_stack(conv, 16, 3) r2_1 = resnet_basic_inc(r1_1, 32) r2_2 = resnet_basic_stack(r2_1, 32, 2) r3_1 = resnet_basic_inc(r2_2, 64) r3_2 = resnet_basic_stack(r3_1, 64, 2) # Global average pooling pool = AveragePooling(filter_shape=(8,8), strides=(1,1))(r3_2) net = Dense(out_dims, init=he_normal(), activation=None)(pool) return net pred_resnet = train_and_evaluate(reader_train, reader_test, max_epochs=5, model_func=create_resnet_model)
Tutorials/CNTK_201B_CIFAR-10_ImageHandsOn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Read FR24 Points File # Reads a raw FR24 Points 'csv' file into a panadas DataFrame. # # The EVENT_TIME field is parsed and the DataFrame is indexed and sorted by FLIGHT_ID and EVENT_TIME import numpy as np import pandas as pd import datetime filename = input('filename: ') points_df = pd.read_csv(filename, parse_dates =['EVENT_TIME'], usecols =['FLIGHT_ID', 'EVENT_TIME', 'LAT', 'LON', 'TRACK_GND', 'ALT', 'SPEED', 'SQUAWK', 'RADAR_ID', 'ON_GROUND', 'VERT_SPEED'], index_col=['FLIGHT_ID', 'EVENT_TIME']) len(points_df) points_df.sort_index(inplace=True) points_df.head() # ## Get the FLIGHT_IDs flight_ids = points_df.index.levels[0] len(flight_ids) flight_ids[:100] # ## Find the entries for a given FLIGHT_ID flight1_id = input('flight_id: ') flight_1_points = points_df.loc[flight1_id] len(flight_1_points) flight_1_points
notebooks/read_raw_data_files/Read FR24 Points File.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- #import matplotlib library import matplotlib.pyplot as plt from matplotlib import style # %matplotlib inline #website traffic data #number of users/visitors on the web site web_customers = [123,645,950,1290,1630,1450,1034,1295,465,205,80] #Time distribution (hourly) time_hrs = [7,8,9,10,11,12,13,14,15,16,17] # + #select the style of the plot style.use('ggplot') #plot the website traffic data (X-axis hrs and Y-axis as number of users) plt.plot(time_hrs,web_customers,alpha=0.4) #set the title of the plot plt.title('Web site traffic') #Annotate plt.annotate('Max',ha='center',va='bottom',xytext=(8,1500),xy=(11,1630),arrowprops={'facecolor':'green'}) #set label for x axis plt.xlabel('Hrs') #set label for y axis plt.ylabel('Number of users') plt.legend() plt.show() # -
plot_with_xy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: python3 # name: python3 # --- # + [markdown] id="view-in-github" # <a href="https://colab.research.google.com/github/apache/beam/blob/master//Users/dcavazos/src/beam/examples/notebooks/documentation/transforms/python/element-wise/flatmap-py.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"/></a> # + [markdown] id="view-the-docs-top" # <table align="left"><td><a target="_blank" href="https://beam.apache.org/documentation/transforms/python/elementwise/flatmap"><img src="https://beam.apache.org/images/logos/full-color/name-bottom/beam-logo-full-color-name-bottom-100.png" width="32" height="32" />View the docs</a></td></table> # + cellView="form" id="_-code" #@title Licensed under the Apache License, Version 2.0 (the "License") # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # + [markdown] id="flatmap" # # FlatMap # # <script type="text/javascript"> # localStorage.setItem('language', 'language-py') # </script> # # </p><table align="left" style="margin-right:1em"> # <td> # <a class="button" target="_blank" href="https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.FlatMap"> # <img src="https://beam.apache.org/images/logos/sdks/python.png" width="32px" height="32px" alt="Pydoc"/> # Pydoc # </a> # </td> # </table> # <br/><br/><br/> # # Applies a simple 1-to-many mapping function over each element in the collection. # The many elements are flattened into the resulting collection. # + [markdown] id="setup" # ## Setup # # To run a code cell, you can click the **Run cell** button at the top left of the cell, # or select it and press **`Shift+Enter`**. # Try modifying a code cell and re-running it to see what happens. # # > To learn more about Colab, see # > [Welcome to Colaboratory!](https://colab.sandbox.google.com/notebooks/welcome.ipynb). # # First, let's install the `apache-beam` module. # + id="setup-code" # !pip install --quiet -U apache-beam # + [markdown] id="examples" # ## Examples # # In the following examples, we create a pipeline with a `PCollection` of produce with their icon, name, and duration. # Then, we apply `FlatMap` in multiple ways to yield zero or more elements per each input element into the resulting `PCollection`. # # `FlatMap` accepts a function that returns an `iterable`, # where each of the output `iterable`'s elements is an element of the resulting `PCollection`. # + [markdown] id="example-1-flatmap-with-a-predefined-function" # ### Example 1: FlatMap with a predefined function # # We use the function `str.split` which takes a single `str` element and outputs a `list` of `str`s. # This pipeline splits the input element using whitespaces, creating a list of zero or more elements. # + id="example-1-flatmap-with-a-predefined-function-code" import apache_beam as beam with beam.Pipeline() as pipeline: plants = ( pipeline | 'Gardening plants' >> beam.Create([ '🍓Strawberry 🥕Carrot 🍆Eggplant', '🍅Tomato 🥔Potato', ]) | 'Split words' >> beam.FlatMap(str.split) | beam.Map(print) ) # + [markdown] id="example-1-flatmap-with-a-predefined-function-2" # <table align="left" style="margin-right:1em"> # <td> # <a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> # View source code # </a> # </td> # </table> # <br/><br/><br/> # + [markdown] id="example-2-flatmap-with-a-function" # ### Example 2: FlatMap with a function # # We define a function `split_words` which splits an input `str` element using the delimiter `','` and outputs a `list` of `str`s. # + id="example-2-flatmap-with-a-function-code" import apache_beam as beam def split_words(text): return text.split(',') with beam.Pipeline() as pipeline: plants = ( pipeline | 'Gardening plants' >> beam.Create([ '🍓Strawberry,🥕Carrot,🍆Eggplant', '🍅Tomato,🥔Potato', ]) | 'Split words' >> beam.FlatMap(split_words) | beam.Map(print) ) # + [markdown] id="example-2-flatmap-with-a-function-2" # <table align="left" style="margin-right:1em"> # <td> # <a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> # View source code # </a> # </td> # </table> # <br/><br/><br/> # + [markdown] id="example-3-flatmap-with-a-lambda-function" # ### Example 3: FlatMap with a lambda function # # For this example, we want to flatten a `PCollection` of lists of `str`s into a `PCollection` of `str`s. # Each input element is already an `iterable`, where each element is what we want in the resulting `PCollection`. # We use a lambda function that returns the same input element it received. # + id="example-3-flatmap-with-a-lambda-function-code" import apache_beam as beam with beam.Pipeline() as pipeline: plants = ( pipeline | 'Gardening plants' >> beam.Create([ ['🍓Strawberry', '🥕Carrot', '🍆Eggplant'], ['🍅Tomato', '🥔Potato'], ]) | 'Flatten lists' >> beam.FlatMap(lambda elements: elements) | beam.Map(print) ) # + [markdown] id="example-3-flatmap-with-a-lambda-function-2" # <table align="left" style="margin-right:1em"> # <td> # <a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> # View source code # </a> # </td> # </table> # <br/><br/><br/> # + [markdown] id="example-4-flatmap-with-a-generator" # ### Example 4: FlatMap with a generator # # For this example, we want to flatten a `PCollection` of lists of `str`s into a `PCollection` of `str`s. # We use a generator to iterate over the input list and yield each of the elements. # Each yielded result in the generator is an element in the resulting `PCollection`. # + id="example-4-flatmap-with-a-generator-code" import apache_beam as beam def generate_elements(elements): for element in elements: yield element with beam.Pipeline() as pipeline: plants = ( pipeline | 'Gardening plants' >> beam.Create([ ['🍓Strawberry', '🥕Carrot', '🍆Eggplant'], ['🍅Tomato', '🥔Potato'], ]) | 'Flatten lists' >> beam.FlatMap(generate_elements) | beam.Map(print) ) # + [markdown] id="example-4-flatmap-with-a-generator-2" # <table align="left" style="margin-right:1em"> # <td> # <a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> # View source code # </a> # </td> # </table> # <br/><br/><br/> # + [markdown] id="example-5-flatmaptuple-for-key-value-pairs" # ### Example 5: FlatMapTuple for key-value pairs # # If your `PCollection` consists of `(key, value)` pairs, # you can use `FlatMapTuple` to unpack them into different function arguments. # + id="example-5-flatmaptuple-for-key-value-pairs-code" import apache_beam as beam def format_plant(icon, plant): if icon: yield '{}{}'.format(icon, plant) with beam.Pipeline() as pipeline: plants = ( pipeline | 'Gardening plants' >> beam.Create([ ('🍓', 'Strawberry'), ('🥕', 'Carrot'), ('🍆', 'Eggplant'), ('🍅', 'Tomato'), ('🥔', 'Potato'), (None, 'Invalid'), ]) | 'Format' >> beam.FlatMapTuple(format_plant) | beam.Map(print) ) # + [markdown] id="example-5-flatmaptuple-for-key-value-pairs-2" # <table align="left" style="margin-right:1em"> # <td> # <a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> # View source code # </a> # </td> # </table> # <br/><br/><br/> # + [markdown] id="example-6-flatmap-with-multiple-arguments" # ### Example 6: FlatMap with multiple arguments # # You can pass functions with multiple arguments to `FlatMap`. # They are passed as additional positional arguments or keyword arguments to the function. # # In this example, `split_words` takes `text` and `delimiter` as arguments. # + id="example-6-flatmap-with-multiple-arguments-code" import apache_beam as beam def split_words(text, delimiter=None): return text.split(delimiter) with beam.Pipeline() as pipeline: plants = ( pipeline | 'Gardening plants' >> beam.Create([ '🍓Strawberry,🥕Carrot,🍆Eggplant', '🍅Tomato,🥔Potato', ]) | 'Split words' >> beam.FlatMap(split_words, delimiter=',') | beam.Map(print) ) # + [markdown] id="example-6-flatmap-with-multiple-arguments-2" # <table align="left" style="margin-right:1em"> # <td> # <a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> # View source code # </a> # </td> # </table> # <br/><br/><br/> # + [markdown] id="example-7-flatmap-with-side-inputs-as-singletons" # ### Example 7: FlatMap with side inputs as singletons # # If the `PCollection` has a single value, such as the average from another computation, # passing the `PCollection` as a *singleton* accesses that value. # # In this example, we pass a `PCollection` the value `','` as a singleton. # We then use that value as the delimiter for the `str.split` method. # + id="example-7-flatmap-with-side-inputs-as-singletons-code" import apache_beam as beam with beam.Pipeline() as pipeline: delimiter = pipeline | 'Create delimiter' >> beam.Create([',']) plants = ( pipeline | 'Gardening plants' >> beam.Create([ '🍓Strawberry,🥕Carrot,🍆Eggplant', '🍅Tomato,🥔Potato', ]) | 'Split words' >> beam.FlatMap( lambda text, delimiter: text.split(delimiter), delimiter=beam.pvalue.AsSingleton(delimiter), ) | beam.Map(print) ) # + [markdown] id="example-7-flatmap-with-side-inputs-as-singletons-2" # <table align="left" style="margin-right:1em"> # <td> # <a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> # View source code # </a> # </td> # </table> # <br/><br/><br/> # + [markdown] id="example-8-flatmap-with-side-inputs-as-iterators" # ### Example 8: FlatMap with side inputs as iterators # # If the `PCollection` has multiple values, pass the `PCollection` as an *iterator*. # This accesses elements lazily as they are needed, # so it is possible to iterate over large `PCollection`s that won't fit into memory. # + id="example-8-flatmap-with-side-inputs-as-iterators-code" import apache_beam as beam def normalize_and_validate_durations(plant, valid_durations): plant['duration'] = plant['duration'].lower() if plant['duration'] in valid_durations: yield plant with beam.Pipeline() as pipeline: valid_durations = pipeline | 'Valid durations' >> beam.Create([ 'annual', 'biennial', 'perennial', ]) valid_plants = ( pipeline | 'Gardening plants' >> beam.Create([ {'icon': '🍓', 'name': 'Strawberry', 'duration': 'Perennial'}, {'icon': '🥕', 'name': 'Carrot', 'duration': 'BIENNIAL'}, {'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'}, {'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'}, {'icon': '🥔', 'name': 'Potato', 'duration': 'unknown'}, ]) | 'Normalize and validate durations' >> beam.FlatMap( normalize_and_validate_durations, valid_durations=beam.pvalue.AsIter(valid_durations), ) | beam.Map(print) ) # + [markdown] id="example-8-flatmap-with-side-inputs-as-iterators-2" # <table align="left" style="margin-right:1em"> # <td> # <a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> # View source code # </a> # </td> # </table> # <br/><br/><br/> # # > **Note**: You can pass the `PCollection` as a *list* with `beam.pvalue.AsList(pcollection)`, # > but this requires that all the elements fit into memory. # + [markdown] id="example-9-flatmap-with-side-inputs-as-dictionaries" # ### Example 9: FlatMap with side inputs as dictionaries # # If a `PCollection` is small enough to fit into memory, then that `PCollection` can be passed as a *dictionary*. # Each element must be a `(key, value)` pair. # Note that all the elements of the `PCollection` must fit into memory for this. # If the `PCollection` won't fit into memory, use `beam.pvalue.AsIter(pcollection)` instead. # + id="example-9-flatmap-with-side-inputs-as-dictionaries-code" import apache_beam as beam def replace_duration_if_valid(plant, durations): if plant['duration'] in durations: plant['duration'] = durations[plant['duration']] yield plant with beam.Pipeline() as pipeline: durations = pipeline | 'Durations dict' >> beam.Create([ (0, 'annual'), (1, 'biennial'), (2, 'perennial'), ]) valid_plants = ( pipeline | 'Gardening plants' >> beam.Create([ {'icon': '🍓', 'name': 'Strawberry', 'duration': 2}, {'icon': '🥕', 'name': 'Carrot', 'duration': 1}, {'icon': '🍆', 'name': 'Eggplant', 'duration': 2}, {'icon': '🍅', 'name': 'Tomato', 'duration': 0}, {'icon': '🥔', 'name': 'Potato', 'duration': -1}, ]) | 'Replace duration if valid' >> beam.FlatMap( replace_duration_if_valid, durations=beam.pvalue.AsDict(durations), ) | beam.Map(print) ) # + [markdown] id="example-9-flatmap-with-side-inputs-as-dictionaries-2" # <table align="left" style="margin-right:1em"> # <td> # <a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> # View source code # </a> # </td> # </table> # <br/><br/><br/> # + [markdown] id="related-transforms" # ## Related transforms # # * [Filter](https://beam.apache.org/documentation/transforms/python/elementwise/filter) is useful if the function is just # deciding whether to output an element or not. # * [ParDo](https://beam.apache.org/documentation/transforms/python/elementwise/pardo) is the most general element-wise mapping # operation, and includes other abilities such as multiple output collections and side-inputs. # * [Map](https://beam.apache.org/documentation/transforms/python/elementwise/map) behaves the same, but produces exactly one output for each input. # # <table> # <td> # <a class="button" target="_blank" href="https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.FlatMap"> # <img src="https://beam.apache.org/images/logos/sdks/python.png" width="32px" height="32px" alt="Pydoc"/> # Pydoc # </a> # </td> # </table> # <br/><br/><br/> # + [markdown] id="view-the-docs-bottom" # <table align="left"><td><a target="_blank" href="https://beam.apache.org/documentation/transforms/python/elementwise/flatmap"><img src="https://beam.apache.org/images/logos/full-color/name-bottom/beam-logo-full-color-name-bottom-100.png" width="32" height="32" />View the docs</a></td></table>
examples/notebooks/documentation/transforms/python/element-wise/flatmap-py.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import math import time import pickle as pkl import gensim import shutil import glob import os import smart_open import time IV_file = '../../data/pickle/inverted_index.pkl' TAGDICFILE = '../../data/pickle/trainTagsToIndex.pkl' DOC_LENGTH_FILE = '../../data/pickle/doc_length.pkl' QUERIES_FILE = '../../data/queries.txt' DOC2VECMODEL = '../../data/model_embeddings/modelembedding60_40.pkl' stopwords = open('../code/stopwords.txt', 'r').readlines() punctuation = open('../code/punctuation.txt', 'r').readlines() stopwords = [i.strip() for i in stopwords] punctuation = [i.strip() for i in punctuation] f = open(IV_file, 'rb') inverted_index = pkl.load(f) f.close() f = open(DOC_LENGTH_FILE, 'rb') documentLenArr = pkl.load(f) f.close() f = open(TAGDICFILE, 'rb') tagDic = pkl.load(f) f.close() numdocuments = len(documentLenArr) k1 = 1.5 b = 0.5 avgDoclength = sum(documentLenArr)*1.0/numdocuments maxranking = 50 f = open(DOC2VECMODEL, 'rb') doc2vecmodel = pkl.load(f) f.close() # + def read_corpus_query(fname): with smart_open.open(fname, encoding="iso-8859-1") as f: for i, line in enumerate(f): # print(i,line) tokens = gensim.utils.simple_preprocess(line) yield tokens # - # assuming numdocuments, documentLenArr, k1, b, avgDoclength, maxrank #usage sort_index(getScoreForQuery(find_count(query))) def getScoreForQuery(queryMap): documentToScore = {} for term, termfreq in queryMap.items(): if term in inverted_index: posting = inverted_index[term] weightOrIdf = calcIdf(len(posting)) for docInfo in posting: doc_id = docInfo[0] doc_id_freq = docInfo[1] scoreTermDoc = getScoreForTermForDocument(termfreq,doc_id,weightOrIdf,doc_id_freq) if doc_id in documentToScore: documentToScore[doc_id] = documentToScore[doc_id] + scoreTermDoc else: documentToScore[doc_id] = scoreTermDoc return documentToScore def getScoreForQueryOptimized(queryMap, qindex): documentToScore = {} thisdocset = docsets[qindex] for term, termfreq in queryMap.items(): if term in inverted_index: posting = inverted_index[term] weightOrIdf = calcIdf(len(posting)) for docInfo in posting: doc_id = docInfo[0] if doc_id not in thisdocset: continue doc_id_freq = docInfo[1] scoreTermDoc = getScoreForTermForDocument(termfreq,doc_id,weightOrIdf,doc_id_freq) if doc_id in documentToScore: documentToScore[doc_id] = documentToScore[doc_id] + scoreTermDoc else: documentToScore[doc_id] = scoreTermDoc return documentToScore def calcIdf(lenposting): ans = math.log((numdocuments*1.0 - lenposting +0.5)/(lenposting+0.5)) return ans def getScoreForTermForDocument(qfi,docNo,weight,tfi): docLength = documentLenArr[docNo]*1.0 ans = weight* ((tfi*(k1+1)*1.0)/(tfi+k1*(1.0-b+b*(docLength/avgDoclength)))) ans = ans*qfi return ans def find_count(text_arr): dictionary = {} for i in text_arr: if (i not in stopwords and i not in punctuation): if (i in dictionary): dictionary[i] += 1 else: dictionary[i] = 1 return dictionary def sort_index(documentToScore): ans = [] numvaluesAdded=0 while(numvaluesAdded<=maxranking and len(documentToScore)!=0): tempkey = max(documentToScore, key=documentToScore.get) ans.append((tempkey, documentToScore[tempkey])) numvaluesAdded +=1 del documentToScore[tempkey] return ans def tagToScore(documentToScore): ans = [] for doc_id, score in documentToScore: ans.append((tagDic[doc_id], score)) return ans; def writeOutput(answers, filename): ansstr = "" for i in range(len(answers)): thisanswer = answers[i] doctagToScore = tagToScore(thisanswer) for j in range(len(doctagToScore)): tempstr = str(i+51) + " 0 "+ doctagToScore[j][0] + " " + str(j+1)+" "+ str(doctagToScore[j][1])+ " p\n" # print(tempstr) ansstr += tempstr outfile = open(filename, "w") outfile.write(ansstr) outfile.close() writeOutput(answersall, "../../data/answers.txt") writeOutput(answersallOptimized, "../../data/answersOptimized.txt") # %%time answers0 = sort_index(getScoreForQuery(find_count(queries[0].split(' ')))) # + # %%time answers0opti = sort_index(getScoreForQueryOptimized(find_count(queries[0].split(' ')),0)) # - answers0 answers0opti # %%time starttime = time.time() queries = open(QUERIES_FILE, 'r').readlines() answersall = [] for x in range(len(queries)): answersall.append(sort_index(getScoreForQuery(find_count(queries[x].split(' '))))) endtime = time.time() print(endtime-starttime) # + def minnextDoc(leastdoc): mindoc = numdocuments minkey = None for key,value in nextDocDic.items(): # if key not in inverted_index: # continue keyInv = inverted_index[key] if(value+1<=len(keyInv)-1): thisdoc = inverted_index[key][value+1][0] if(thisdoc>leastdoc and thisdoc<mindoc): mindoc = thisdoc minkey = key if(minkey!=None): nextDocDic[minkey] +=1 return mindoc else: return numdocuments def minnextDocOptimized(leastdoc, qindex): mindoc = numdocuments minkey = None for key,value in nextDocDic.items(): # if key not in inverted_index: # continue keyInv = inverted_index[key] if(value+1<=len(keyInv)-1): thisdoc = inverted_index[key][value+1][0] if(thisdoc>leastdoc and thisdoc<mindoc): if(thisdoc in docsets[qindex]): mindoc = thisdoc minkey = key if(minkey!=None): nextDocDic[minkey] +=1 return mindoc else: return numdocuments def getDAATScore(docno): documentToScore = {} docScore = 0 for term, termfreq in queryMap.items(): if term in inverted_index: posting = inverted_index[term] weightOrIdf = calcIdf(len(posting)) docNolist = posting[:,0] findex = np.searchsorted(docNolist, docno) # docInfo = next((x for x in posting if x[0]==docno), None) # print(docInfo) # print(type(docInfo)) # if docInfo is not None: if (findex!=len(posting)): docInfo = posting[findex] doc_id = docInfo[0] doc_id_freq = docInfo[1] scoreTermDoc = getScoreForTermForDocument(termfreq,doc_id,weightOrIdf,doc_id_freq) docScore += scoreTermDoc return docScore # - # %%time answersall = [] queries = open(QUERIES_FILE, 'r').readlines()[0:2] for x in range(2): print("query x", x) results={} r=0 queryMap = find_count(queries[x].split(' ')) nextDocDic = {} for key in queryMap: if key in inverted_index: nextDocDic[key] = -1 d = minnextDoc(-1) while (d!=numdocuments): # print("document", d) scoreford = getDAATScore(d) # results.append((d,scoreford)) results[d] = scoreford d = minnextDoc(d) # results = sort_index(results) answersall.append(sort_index(results)) # %%time starttime = time.time() queries = open(QUERIES_FILE, 'r').readlines() test_corpus = list(read_corpus_query(QUERIES_FILE)) docsets = [] for doc_id in range(len(test_corpus)): inferred_vector = doc2vecmodel.infer_vector(test_corpus[doc_id]) sims = doc2vecmodel.docvecs.most_similar([inferred_vector], topn=80000) # print(sims[:]) docsets.append(set([x for x,_ in sims])) # %%time answersallOptimized = [] queries = open(QUERIES_FILE, 'r').readlines() for x in range(len(queries)): print("query x", x) results={} r=0 queryMap = find_count(queries[x].split(' ')) nextDocDic = {} for key in queryMap: if key in inverted_index: nextDocDic[key] = -1 d = minnextDocOptimized(-1,x) while (d!=numdocuments): # print("document", d) scoreford = getDAATScore(d) # results.append((d,scoreford)) results[d] = scoreford d = minnextDocOptimized(d,x) # results = sort_index(results) answersallOptimized.append(sort_index(results)) writeOutput(answersall, "../../data/answersdaat.txt") writeOutput(answersallOptimized, "../../data/answersOptimizeddaat.txt") answersall answersallOptimized sort_index(results) tagToScore(answers0) # %%time starttime = time.time() queries = open(QUERIES_FILE, 'r').readlines() test_corpus = list(read_corpus_query(QUERIES_FILE)) docsets = [] for doc_id in range(len(test_corpus)): inferred_vector = doc2vecmodel.infer_vector(test_corpus[doc_id]) sims = doc2vecmodel.docvecs.most_similar([inferred_vector], topn=10000) # print(sims[:]) docsets.append(set([x for x,_ in sims])) answersallOptimized = [] for x in range(len(queries)): answersallOptimized.append(sort_index(getScoreForQueryOptimized(find_count(queries[x].split(' ')),x))) endtime = time.time() print(endtime-starttime) tagToScore(answersallOptimized[0]) avgDoclength 26981 in allsims[0] [(x[0] in allsims[0]) for x in answers0]
Jupyter_notebooks/bm25-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Spindles detection on specific sleep stages # # This notebook demonstrates how to apply **spindles detection on specific sleep stages** by providing an hypnogram (= sleep stage) to YASA. # # Please make sure to install the latest version of YASA first by typing the following line in your terminal or command prompt: # # `pip install --upgrade yasa` # # # ## 1. Loading the EEG data and the hypnogram # # We first load a full-night 3-channels dataset (Cz, Fz, Pz) sampled at 100 Hz. The data is in compressed NumPy format (*.npz*). # + import yasa import numpy as np import pandas as pd import seaborn as sns # Load data f = np.load('data_full_6hrs_100Hz_Cz+Fz+Pz.npz') data, ch_names = f['data'], f['chan'] sf = 100. times = np.arange(data.size) / sf print(data.shape, ch_names) print(np.round(data[:, 0:5], 3)) # - # Next, we load the sleep staging vector (a.k.a hypnogram), which is a simple text file in which each value typically represents 30 seconds of data. Sleep stages are encoded as integer (*0: Wake, 1: N1 sleep, 2: N2 sleep, 3: N3 sleep, 4: REM sleep*). In the code below, we load our 30-sec hypnogram and upsample it to match the sampling frequency and length of data, using YASA's built-in [hypno_upsample_to_data](https://raphaelvallat.com/yasa/build/html/generated/yasa.hypno_upsample_to_data.html#yasa.hypno_upsample_to_data) function. Please refer to [08_bandpower.ipynb](08_bandpower.ipynb) for more details on how to manipulate hypnogram in YASA. hypno_30s = np.loadtxt('data_full_6hrs_100Hz_hypno_30s.txt') hypno = yasa.hypno_upsample_to_data(hypno=hypno_30s, sf_hypno=(1/30), data=data, sf_data=sf) print(hypno.shape, 'Unique values =', np.unique(hypno)) # To have a quick overview of our data, we can use [yasa.plot_spectrogram](https://raphaelvallat.com/yasa/build/html/generated/yasa.plot_spectrogram.html) function. Please check out [10_spectrogram.ipynb](10_spectrogram.ipynb) for a walkthrough of this function. # We use data[0, :] to select only the first channel, which in this case is Cz fig = yasa.plot_spectrogram(data[0, :], sf, hypno) # The top panel shows the full-night hypnogram, and the bottom panel show the time-frequency representation of the full-night recording, with warmer colors indicating greater power in the frequency range defined on the left y-axis. Notice how we can definitely see some spindles-related activity around 12-15 Hz during N2 and N3 sleep. # ## 2. Apply the sleep spindles detection # # To apply the multi-channel detection, we use the [yasa.spindles_detect](https://raphaelvallat.com/yasa/build/html/generated/yasa.spindles_detect.html) function. We also pass the hypnogram to restrain the detection to specific sleep stages, which are defined as integers in the ``include`` argument. Below, we're limiting the detection to NREM sleep, i.e. N1 (=1), N2 (=2) and N3 (=3) sleep. # + sp = yasa.spindles_detect(data, sf, ch_names=ch_names, hypno=hypno, include=(1, 2, 3)) # Display the full detection dataframe sp.summary().round(3) # - # Using the [summary](https://raphaelvallat.com/yasa/build/html/generated/yasa.SpindlesResults.html#yasa.SpindlesResults.summary) method, we can easily get the spindles parameters, averaged across channel and sleep stage. The ``Density`` column is the number of spindles per minutes of each stage. For example, in the first row (Stage = 1, Channel = Cz), a density of 2.0 means that there are 2 spindles per minutes of N1 sleep detected on Cz. sp.summary(grp_chan=True, grp_stage=True, aggfunc='mean') # Plot an average template per channel sp.plot_average(ci=None, palette="Set1"); # Plot an average template per stage, using a custom color palette sp.plot_average(hue="Stage", ci=None, palette=['tab:grey', 'tab:blue', '#114061']); # **Detection on N2 and N3 sleep only (exclude N1)** # + sp = yasa.spindles_detect(data, sf, ch_names=ch_names, hypno=hypno, include=(2, 3)) # Now we're only grouping (= stratifying) by stage but not by channel # We're also using the median instead of the mean to calculate the average spindles features sp.summary(grp_chan=False, grp_stage=True, aggfunc='median').round(3) # - # **Detection on the whole recording** # # If we finally decide to apply the detection on the full recording, i.e. not limiting the detection to any sleep stages, we can simply remove the ``hypno=hypno`` argument. # + sp = yasa.spindles_detect(data, sf, ch_names=ch_names) # Here we can only average across channels since sleep stage are unknown. sp.summary(grp_chan=True).round(3) # - # Visual inspection of the detected spindles # %matplotlib widget sp.plot_detection();
notebooks/03_spindles_detection_NREM_only.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 特征集 # **学习目标**:创建一个包含更少特征但是效果和更复杂的特征集一样出色的集合。 # ### 设置 # 加载加州住房数据集 # + from __future__ import print_function import math from IPython import display from matplotlib import cm from matplotlib import gridspec from matplotlib import pyplot as plt import numpy as np import pandas as pd from sklearn import metrics import tensorflow as tf from tensorflow.python.data import Dataset tf.logging.set_verbosity(tf.logging.ERROR) pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.1f}'.format california_housing_df = pd.read_csv("https://download.mlcc.google.cn/mledu-datasets/california_housing_train.csv", sep=',') california_housing_df = california_housing_df.reindex(np.random.permutation(california_housing_df.index)) # + # 预处理特征 def preprocess_features(california_housing_df): """预处理房价的DataFrame,准备输入特征,添加人为特征 Args: california_housing_df: 包含加州房价数据的df Returns: 包含处理后特征的DataFrame """ selected_features = california_housing_df[["latitude", "longitude", "housing_median_age", "total_rooms", "total_bedrooms", "population", "households", "median_income"]] processed_features = selected_features.copy() # 创建额外的特征 processed_features["rooms_per_person"] = (california_housing_df["total_rooms"] / california_housing_df["population"]) return processed_features # 预处理目标 def preprocess_targets(california_housing_df): """从加州房价DataFrame准备目标特征,即标签 Args: california_housing_dataframe: 包含加州房价数据的df Returns: 包含目标标签的df """ output_targets = pd.DataFrame() # 将目标标签的值缩放 output_targets["median_house_value"] = (california_housing_df["median_house_value"] / 1000.0) return output_targets # + # 选择前12000/17000用于训练 training_examples = preprocess_features(california_housing_df.head(12000)) training_targets = preprocess_targets(california_housing_df.head(12000)) # 选择最后的5000用于验证 validation_examples = preprocess_features(california_housing_df.tail(5000)) validation_targets = preprocess_targets(california_housing_df.tail(5000)) print("Training examples summary:") display.display(training_examples.describe()) print("Validation examples summary:") display.display(validation_examples.describe()) print("Training targets summary:") display.display(training_targets.describe()) print("Validation targets summary:") display.display(validation_targets.describe()) # - # ### 任务1. 构建良好的特征集 # 如果只使用少数的特征如(2-3个),我们能获得多好的效果? # # **相关矩阵**展现了两两比较的相关性。这里的相关性被定义为Pearson相关系数,即两个变量的协方差和标准差的商。 # # 其相关性值范围在\[-1, +1\]之间,具有以下含义: # # |完全负相关|不相关|完全正相关| # |:---:|:---:|:---:| # |-1.0|0|+1.0| correlation_mx_df = training_examples.copy() correlation_mx_df["target"] = training_targets["median_house_value"] correlation_mx_df.corr() # 通常情况下,我们需要: # - 具有与目标密切相关的特征 # - 相互之间相关性不太密切的特征。 # - 也可以构建合成特征 def my_input_fn(features, targets, batch_size=1,shuffle=True, num_epochs=None): """使用多个特征训练一个线性回归器 Args: features: 特征的DataFrame targets: 目标的DataFrame batch_size: 传递给模型的批大小 shuffle: 是否打乱数据 num_epochs: 数据重复的epochs数 Returns: 下一批数据元组(features, labels) """ # 转换DataFrame到numpy数组 features = {key:np.array(value) for key,value in dict(features).items()} # 构建数据集 ds = Dataset.from_tensor_slices((features, targets)) ds = ds.batch(batch_size).repeat(num_epochs) # 打乱数据 if shuffle: ds = ds.shuffle(10000) # 返回下一批数据 features, labels = ds.make_one_shot_iterator().get_next() return features, labels def construct_feature_columns(input_features): """构建特征列 Args: input_features: 数值特征的名字 Returns: 特征列集 """ return set([tf.feature_column.numeric_column(my_feature) for my_feature in input_features]) def train_model(learning_rate, steps, batch_size, training_examples, training_targets, validation_examples, validation_targets): """使用多个特征训练一个线性回归模型 """ periods = 10 steps_per_period = steps / periods # 定义优化器 my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0) # 创建一个线性回归器 linear_regressor = tf.estimator.LinearRegressor(feature_columns=construct_feature_columns(training_examples), optimizer=my_optimizer) # 创建输入函数 training_input_fn = lambda: my_input_fn(training_examples,training_targets["median_house_value"], batch_size=batch_size) predict_training_input_fn = lambda: my_input_fn(training_examples, training_targets["median_house_value"], num_epochs=1, shuffle=False) predict_validation_input_fn = lambda: my_input_fn(validation_examples, validation_targets["median_house_value"], num_epochs=1, shuffle=False) # 训练模型,并在每个周期输出loss print("Start training...") print("RMSE (on training data): ") training_rmse = [] validation_rmse = [] for period in range(0, periods): linear_regressor.train(input_fn=training_input_fn, steps=steps_per_period) # 计算预测 training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn) training_predictions = np.array([item["predictions"][0] for item in training_predictions]) validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn) validation_predictions = np.array([item["predictions"][0] for item in validation_predictions]) # 计算训练和验证的损失 training_root_mean_squared_error = math.sqrt(metrics.mean_squared_error(training_predictions, training_targets)) validation_root_mean_squared_error = math.sqrt(metrics.mean_squared_error(validation_predictions, validation_targets)) # 输出结果 print("period %02d : %.2f" % (period, training_root_mean_squared_error)) training_rmse.append(training_root_mean_squared_error) validation_rmse.append(validation_root_mean_squared_error) print("Model training finished!") # 损失随周期变化图 plt.ylabel("RMSE") plt.xlabel("Periods") plt.title("Root Mean Squared Error via Periods") plt.tight_layout() plt.plot(training_rmse, label="training") plt.plot(validation_rmse, label="validaiton") plt.legend() return linear_regressor # + # 训练模型 minimal_features = ["median_income", "latitude"] minimal_training_examples = training_examples[minimal_features] minimal_validation_examples = validation_examples[minimal_features] _ = train_model(learning_rate=0.001, steps=500, batch_size=5, training_examples=minimal_training_examples, training_targets=training_targets, validation_examples=minimal_validation_examples, validation_targets=validation_targets) # - # ### 任务2. 更好地利用纬度信息 # 绘制纬度(latitude)和房价中值(median_house_value)的图形发现两者确实不存在线性关系。但是图中的峰值与洛杉矶和旧金山的位置相对应。 plt.scatter(training_examples["latitude"], training_targets["median_house_value"]) # **尝试去创建一些更好利用纬度的合成特征** # - 把纬度特征变为映射为到旧金山的距离 |latitude-38| # - 把纬度分10个桶 # + # process 1 processed_training_examples1 = training_examples.copy() processed_validation_examples1 = validation_examples.copy() processed_training_examples1["distance_to_san_fransico"] = (processed_training_examples1["latitude"] - 38).abs() processed_validation_examples1["distance_to_san_fransico"] = (processed_validation_examples1["latitude"] - 38).abs() # process 2 def process_latitude_feature(examples_df): for i in np.arange(32, 42, 1, dtype=np.int): feature_name = "latitude_%d_to_%d" % (i, i+1) examples_df[feature_name] = (examples_df["latitude"]).apply( lambda val: i<val<=(i+1)) return examples_df processed_training_examples2 = training_examples.copy() processed_validation_examples2 = validation_examples.copy() processed_training_examples2 = process_latitude_feature(processed_training_examples2) processed_validation_examples2 = process_latitude_feature(processed_validation_examples2) display.display(processed_training_examples2) display.display(processed_validation_examples2) # + # 使用处理1的特征 kl_features1 = ["distance_to_san_fransico", "median_income"] kl_training_examples1 = processed_training_examples1[kl_features1] kl_validation_examples1 = processed_validation_examples1[kl_features1] _ = train_model(learning_rate=0.01, steps=2000, batch_size=5, training_examples=kl_training_examples1, training_targets=training_targets, validation_examples=kl_validation_examples1, validation_targets=validation_targets) # 使用处理2的特征 kl_features2 = ["median_income"] for i in np.arange(32, 42, 1, dtype=np.int): feature_name = "latitude_%d_to_%d" % (i, i+1) kl_features2.append(feature_name) kl_training_examples2 = processed_training_examples2[kl_features2] kl_validation_examples2 = processed_validation_examples2[kl_features2] _ = train_model(learning_rate=0.01, steps=2000, batch_size=5, training_examples=kl_training_examples2, training_targets=training_targets, validation_examples=kl_validation_examples2, validation_targets=validation_targets)
code/feature_sets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'melusine_new # # ' # language: python # name: melusine_new # --- # # Unsupervised Semantic Analysis tutorial # The **SemanticDetector** class is used to predict a sentiment score in a document / email. # # For that purpose, two inputs are required: # - a list of seed words that caracterize a sentiment # Exemple : the seed words ["mad", "furious", "insane"] caracterize the sentiment "dissatisfaction" # - a trained embedding (Melusine **Embedding** class instance) to compute distances between words/tokens # # The three steps for sentiment score prediction are the following: # - Instanciate a SentimentDetector object with a list of seed words as argument # - Use the SentimentDetector.fit method (with an embedding object as argument) to compute the lexicons # - Use the SentimentDetector.predict method on a document/email DataFrame to predict the sentiment score # ## Minimal working exemple # + import pandas as pd import numpy as np # NLP tools from melusine.nlp_tools.embedding import Embedding from melusine.nlp_tools.tokenizer import Tokenizer # Models from melusine.models.modeler_semantic import SemanticDetector # - # ### Load email data df_emails_clean = pd.read_csv('./data/emails_preprocessed.csv', encoding='utf-8', sep=';') df_emails_clean = df_emails_clean[['clean_body']] df_emails_clean = df_emails_clean.astype(str) # ### Embedding # Train an embedding using the text data in the 'clean_body' column embedding = Embedding(input_column='clean_body', size=300, min_count=2) embedding.train(df_emails_clean) # Print a list of words present in the Embedding vocabulary list(embedding.embedding.vocab.keys())[:3] # Test the trained embedding : print most similar words embedding.embedding.most_similar('client', topn=3) # ### Tokenizer # Tokenize the text in the clean_body column tokenizer = Tokenizer (input_column='clean_body', stop_removal=True, n_jobs=20) df_emails_clean = tokenizer.fit_transform(df_emails_clean) # Test the tokenizer : print tokens df_emails_clean['tokens'].head() # ### Instanciate and fit the Sentiment Detector # + seed_word_list = ['immatriculation'] # Instanciate a SentimentDetector object semantic_detector = SemanticDetector(base_seed_words=seed_word_list, tokens_column='tokens') # Fit the SentimentDetector using the trained embedding semantic_detector.fit(embedding=embedding) # - print('List of seed words:') print(semantic_detector.seed_list) # + seed_word = semantic_detector.seed_list[0] lexicon = semantic_detector.lexicon sorted_lexicon = dict(sorted(lexicon.items(), key = lambda x: x[0])) print(f'(Part of) Lexicon associated with the seed words "{", ".join(semantic_detector.seed_list)}":') for word, sentiment_score in list(sorted_lexicon.items())[:10]: print(' ' + word + ' : ' + str(sentiment_score)) # - # ### Predict and print the sentiment score # # **Warning :** In this exemple, the embedding is trained on a corpus of 40 emails which is WAY too small to yield valuable results # + # Choose the name of the column returned (default is "score") return_column = "semantic_score" # Predict the sentiment score on each email of the DataFrame df_emails_clean = semantic_detector.predict(df_emails_clean, return_column=return_column) # Print emails with the maximum sentiment score df_emails_clean.sort_values(by=return_column, ascending=False).head() # - # ## The SentimentDetector class # The SemanticDetector class provides an unsupervised methodology to assign a sentiment score to a corpus of documents/emails. The methodology used to predict a sentiment score using the SemanticDetector is described below: # # 1. **Define a list of seed words that caracterize a sentiment** # - Take a list of seed words as input # - If the `extend_seed_word_list` parameter is set to True: extend the list of seed words with words sharing the same root (dance -> ["dancing", "dancer"]) # # # 2. **Fit the model (= create a lexicon to assign a score for every word in the vocabulary)** # - Create a lexicon for each seed word by computing the cosine similarity between the seed word and all the words in the vocabulary is computed. # - Aggregate the similarity score obtained for the different seed words in a unique lexicon # - (To compute cosine similarities, a trained embedding is required.) # # 3. **Predict a sentiment score for emails/documents** # - Filter out the tokens in the document that are not in the vocabulary. # - For each remaining token, compute its sentiment score using the lexicon. # - For each email, aggregate the score accross different tokens # The arguments of a SemanticDetector object are : # # - **base_seed_words :** the list of seed words that caracterize a sentiment/theme # - **base_anti_seed_words :** the list of seed words that caracterize undesired sentiments/themes # - **anti_weight :** the weight of anti_seeds in the computation of the semantic score # - **tokens_column :** name of the column in the input DataFrame that contains tokens # - **extend_seed_word_list :** if True: complement seed words with words sharing the same root (dance -> ["dancing", "dancer"]). Default value False. # - **normalize_scores :** if True: normalize the lexicon scores of eache word. Default value False. # - **aggregation_function_seed_wise :** Function to aggregate the scores associated with a token accross the different seeds. Default function is a max. # - **aggregation_function_email_wise :** Function to aggregate the scores associated with the different tokens in an email. Default function is the 60th percentile. # - **n_jobs :** the number of cores used for computation. Default value, 1. # ## Filter out undesired themes with using "anti seed words" and "anti_ratio" # # If you want to detect emergency in your emails, you could use the seed word `"emergency"`. # * "I need an answer, this is an emergency !!" => Semantic score = 0.98 # # But you might detect undesired sentences such as: # * "Yesterday I tested the emergency brake of my car" => Semantic score = 0.95 # # You can prevent the detection of undesired themes using anti seed words: # * `base_anti_seed_word_list = ['brake']` # * "Yesterday I tested the emergency brake of my car" => Semantic score = 0.50 # # You can control the contribution of anti seed words using the `anti_weight` (default 0.3): # * `base_anti_seed_word_list = ['brake']` # * `anti_weight = 0.6` # * "Yesterday I tested the emergency brake of my car" => Semantic score = 0.30 # # The formula used to compute the semantic score is: # * semantic score = seed_word_contrib - anti_weight * anti_seed_word_contrib # # Warning : an `anti_weight` above one means anti seeds contribute more (negatively) than regular seeds # + seed_word_list = ['immatriculation'] anti_seed_word_list = ['demandes'] # Instanciate SentimentDetector objects regular_semantic_detector = SemanticDetector(base_seed_words=seed_word_list, tokens_column='tokens') semantic_detector_with_anti = SemanticDetector(base_seed_words=seed_word_list, tokens_column='tokens', base_anti_seed_words = anti_seed_word_list) semantic_detector_with_anti2 = SemanticDetector(base_seed_words=seed_word_list, tokens_column='tokens', base_anti_seed_words = anti_seed_word_list, anti_weight=0.5) # Fit the SentimentDetectors using the trained embedding regular_semantic_detector.fit(embedding=embedding) semantic_detector_with_anti.fit(embedding=embedding) semantic_detector_with_anti2.fit(embedding=embedding) # + # Choose the name of the column returned (default is "score") return_column1 = "semantic_score" return_column2 = "semantic_score with anti (anti_weight=0.3)" return_column3 = "semantic_score with anti (anti_weight=0.5)" # Predict the sentiment score on each email of the DataFrame df_emails_clean = regular_semantic_detector.predict(df_emails_clean, return_column=return_column1) df_emails_clean = semantic_detector_with_anti.predict(df_emails_clean, return_column=return_column2) df_emails_clean = semantic_detector_with_anti2.predict(df_emails_clean, return_column=return_column3) # Print emails with the maximum sentiment score df_emails_clean.sort_values(by=return_column1, ascending=False).head() # - # ## Find extra seed words with the `extend_seed_word_list` parameter # The SentimentDetector "extend_seed_word_list" parameter activates the search for extra seed words sharing the same root as the base seed words. # # For example, if "dance" is a base seed word, "extend_seed_word_list" will loop through the words in the embedding vocabulary and find new seed words such as "dancer", "dancing". # + # Instanciate a SentimentDetector object semantic_detector_extended_seed = SemanticDetector( base_seed_words=['tel', 'assur'], tokens_column='tokens', extend_seed_word_list=True) # Fit the SentimentDetector using the trained embedding semantic_detector_extended_seed.fit(embedding=embedding) # - # Print the extended list of seed words print(semantic_detector_extended_seed.seed_dict) print(semantic_detector_extended_seed.seed_list) # ## Use a custom function to aggregate lexicon scores # ### Aggregate token score over seeds # The SemanticDetector computes a similarity between a word and every seed words. # An aggretion function is then used to keep a single score for each token. # # Exemple : # - Seed word list : ["horse", "animal"] # - Embedding : simulated # # Lexicon "horse" : # { # "apple" : 0.2, # ... # "hello" : 0.1, # ... # "ponies" : 8.8, # ... # "zebra" : 1.2 # } # Lexicon "animal" : # { # "apple" : 0.1, # ... # "hello" : 0.3, # ... # "ponies" : 4.8, # ... # "zebra" : 6.2 # } # # **Aggregated Lexicon :** # { # "apple" : 0.2, # ... # "hello" : 0.3, # ... # "ponies" : 8.8, # ... # "zebra" : 6.2 # } # ### Aggregate semantic score over tokens # When evaluating an email, each word in the email has an associated score. # An aggregation function is used to keep a single score for each email. # # Exemple : # - Sentence : "Hello, I like ponies" # - Seed word list : ["horse", "animal"] # - Embedding : simulated # # **Sentence score :** # - score : score(Hello) + score(I) + score(like) + score(ponies) # - score : 0.3 + 0.1 + 0.2 + 8.8 = 9.4 # # # The semantic score for the email is thus 9.4 # ### Default aggregation functions # # The default aggregation methodology is the following: # - Seed-wise aggregation : For a token, take the max score accross seed # - Exemple : # ponies_score = 8.8 (lexicon "horse") # ponies_score = 4.8 (lexicon "animal") # => Score for the "ponies" token = np.max(8.8, 4.8) = 8.8 # # # - Email-wise aggregation : Given a list of token scores, take the percentile 60 as the sentiment_score for the email # - Exemple : # token_score_list : [0.3 (hello), 0.3 (i), 0.2 (ponies)] # => sentiment_score = np.percentile([0.3, 0.3, 0.2, 8.8], 60) = 0.3 # + # Instanciate a SentimentDetector object with custom aggregation function: # - A mean for the seed-wise aggregation # - A 95th percentile for the email-wise aggregation def aggregation_mean(x): return np.mean(x, axis=0) def aggregation_percentile_95(x): return np.percentile(x, 95) semantic_detector_custom_aggregation = SemanticDetector( base_seed_words=['client'], tokens_column='tokens', aggregation_function_seed_wise=aggregation_mean, aggregation_function_email_wise=aggregation_percentile_95 ) # Fit the SentimentDetector using the trained embedding semantic_detector_custom_aggregation.fit(embedding=embedding) # Predict the sentiment score on each email of the DataFrame df_emails_clean_custom_aggregation = semantic_detector_custom_aggregation.predict(df_emails_clean) # - # ## Multiprocessing # + semantic_detector_multiprocessing = SemanticDetector( base_seed_words=['certificat'], tokens_column='tokens', n_jobs = 2 ) # Fit the SentimentDetector using the trained embedding semantic_detector_multiprocessing.fit(embedding=embedding) # Predict the sentiment score on each email of the DataFrame df_emails_multiprocessing = semantic_detector_multiprocessing.predict(df_emails_clean) # -
tutorial/tutorial12_semantic_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pyJHTDB from pyJHTDB.dbinfo import isotropic1024coarse as info import time as tt npoints = 2 nparticles = 2**5 nsteps = 2**7 x = np.zeros(shape = (npoints, nparticles, 3), dtype = np.float32) x[..., 0] = info['lx']*np.random.random(size = (npoints,))[:, None] # this value is adequate for channel flow x[..., 1] = info['ynodes'][info['ynodes'].shape[0]//2] x[..., 2] = info['lz']*np.random.random(size = (npoints,))[:, None] # + from pyJHTDB import libJHTDB from pyJHTDB.dbinfo import interpolation_code lJHTDB = libJHTDB() lJHTDB.initialize() #Add token auth_token = "<PASSWORD>" #Replace with your own token here lJHTDB.add_token(auth_token) t = info['time'][-1] #final time dt = info['time'][1] - info['time'][0] # this may be too big xfull = np.zeros(shape = (nsteps+1, npoints, nparticles, 3), dtype = np.float32) xfull[0] = x kappa = (2*info['nu'])**.5 for tindex in range(nsteps): print('step {0}'.format(tindex)) # get velocity u = lJHTDB.getData( t, xfull[tindex], sinterp = interpolation_code['M2Q8'], tinterp = interpolation_code['NoTInt'], data_set = info['name'], getFunction = 'getVelocity') # <NAME> dW = np.random.randn(*xfull.shape[1:])*(dt**.5) xfull[tindex+1] = xfull[tindex] - u*dt + kappa*dW t -= dt lJHTDB.finalize() # + # %matplotlib nbagg import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(figsize=(4,4)) ax = fig.add_subplot(111, projection = '3d') for traj in range(xfull.shape[2]): ax.plot(xfull[:, 0, traj, 0], xfull[:, 0, traj, 1], xfull[:, 0, traj, 2], color = 'red') # ax.plot(xfull[:, 1, traj, 0], # xfull[:, 1, traj, 1], # xfull[:, 1, traj, 2], # color = 'blue') # + import numpy as np import ipyvolume as ipv fig = ipv.figure() #ax = fig.add_subplot(111, projection = '3d') for traj in range(xfull.shape[2]): p=ipv.pylab.plot(xfull[:, 0, traj, 0], xfull[:, 0, traj, 1], xfull[:, 0, traj, 2], color = 'red') #print(xfull[:, 0, :, 0].min()) ipv.xlim(xfull[:, 0, :, 0].min(),xfull[:, 0, :, 0].max()) ipv.ylim(xfull[:, 0, :, 1].min(),xfull[:, 0, :, 1].max()) ipv.zlim(xfull[:, 0, :, 2].min(),xfull[:, 0, :, 2].max()) ipv.show() # + lJHTDB = libJHTDB() lJHTDB.initialize() #Add token auth_token = "<PASSWORD>" lJHTDB.add_token(auth_token) start = tt.time() nx=128 ny=512 nz=128 result = lJHTDB.getRawData( 9, start = np.array([0, 0, 0], dtype = np.int), size = np.array([nx, ny, nz], dtype = np.int), data_set = 'channel', getFunction = 'Velocity') end = tt.time() print(end - start) lJHTDB.finalize() print(result.shape) # + #fig = ipv.figure() #p=ipv.pylab.volshow(result[:,:,:,0]) #ipv.show() ipv.quickvolshow(result[:,:,:,0], level=[0.2, 0.5, 1.0], opacity=[0.1, 0.1, 0.1]) # + import ipyvolume as ipv skip=20 lengthx=int(np.ceil(nx/skip)) lengthy=int(np.ceil(ny/skip)) lengthz=int(np.ceil(nz/skip)) x=np.linspace(0, 8*np.pi, num=2049) x=x[0:128:skip] y=np.load('/home/idies/workspace/Storage/zwu27/persistent/pyJHTDB-master/pyJHTDB/data/channel_ygrid.npy') y=y[0:512:skip] z=np.linspace(0, 3*np.pi, num=1537) z=z[0:128:skip] print(z) print(lengthy) y,x,z=np.meshgrid(y,x,z) x=np.reshape(x,lengthx*lengthy*lengthz) y=np.reshape(y,lengthx*lengthy*lengthz) z=np.reshape(z,lengthx*lengthy*lengthz) u=np.reshape(result[0:128:skip,0:512:skip,0:128:skip,0],lengthx*lengthy*lengthz) v=np.reshape(result[0:128:skip,0:512:skip,0:128:skip,1],lengthx*lengthy*lengthz) w=np.reshape(result[0:128:skip,0:512:skip,0:128:skip,2],lengthx*lengthy*lengthz) fig = ipv.figure() quiver = ipv.pylab.quiver(x,y,z,u,v,w,size=5**u) ipv.xlim(0, 1.5) ipv.ylim(-1, 1) ipv.zlim(0, 0.78) #ipv.show() from ipywidgets import FloatSlider, ColorPicker, VBox, jslink size = FloatSlider(min=0, max=30, step=0.1) size_selected = FloatSlider(min=0, max=30, step=0.1) color = ColorPicker() color_selected = ColorPicker() jslink((quiver, 'size'), (size, 'value')) jslink((quiver, 'size_selected'), (size_selected, 'value')) jslink((quiver, 'color'), (color, 'value')) jslink((quiver, 'color_selected'), (color_selected, 'value')) VBox([ipv.gcc(), size, size_selected, color, color_selected]) # -
examples/SDEs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a> # $$ # \newcommand{\set}[1]{\left\{#1\right\}} # \newcommand{\abs}[1]{\left\lvert#1\right\rvert} # \newcommand{\norm}[1]{\left\lVert#1\right\rVert} # \newcommand{\inner}[2]{\left\langle#1,#2\right\rangle} # \newcommand{\bra}[1]{\left\langle#1\right|} # \newcommand{\ket}[1]{\left|#1\right\rangle} # \newcommand{\braket}[2]{\left\langle#1|#2\right\rangle} # \newcommand{\ketbra}[2]{\left|#1\right\rangle\left\langle#2\right|} # \newcommand{\angleset}[1]{\left\langle#1\right\rangle} # $$ # # Linear Operators # # _prepared by <NAME>_
2-math/BlochSphere_Solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/mdhasanali3/3d-model-yolov5/blob/main/product_c_3d_mode.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="GFFkGPiWeA9L" outputId="cac4e8b2-f932-45da-9aa4-6c7d3d15acf9" # !git clone https://github.com/ultralytics/yolov5 # clone # %cd yolov5 # %pip install -qr requirements.txt # install import torch from yolov5 import utils from IPython.display import Image, clear_output # to display images display = utils.notebook_init() # checks print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU')) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="MogT3avmeGvU" outputId="eca08258-8d54-4e02-bcd7-c09ed5ea2cab" # %cd /content/yolov5 # !pip install roboflow from roboflow import Roboflow rf = Roboflow(api_key="<KEY>") project = rf.workspace("3d-model-realworld-evalution").project("product-c") dataset = project.version(1).download("yolov5") # + colab={"base_uri": "https://localhost:8080/"} id="BTZhBL0AeQe1" outputId="8621061d-8fc2-49a5-e2ba-a197ca9fffe6" # this is the YAML file Roboflow wrote for us that we're loading into this notebook with our data # %cat {dataset.location}/data.yaml # + colab={"base_uri": "https://localhost:8080/"} id="qTwI6_aseqKs" outputId="3bb15a3c-4bc9-4165-a0a3-5faa826cfcda" # !python train.py --img 640 --batch 64 --epochs 110 --data {dataset.location}/data.yaml --weights yolov5s.pt --cache # + colab={"base_uri": "https://localhost:8080/"} id="DS0XvweJe07P" outputId="4c3d1302-95b4-4a18-ed49-36d7b257ff51" # !python detect.py --weights runs/train/exp/weights/best.pt --img 416 --conf 0.1 --source {dataset.location}/test/images # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Eikk22jzmzWQ" outputId="f3c5fe0a-f4a8-4416-cb28-92ea84260239" #display inference on ALL test images import glob from IPython.display import Image, display for imageName in glob.glob('/content/yolov5/runs/detect/exp/*.jpg'): #assuming JPG display(Image(filename=imageName)) print("\n") # + colab={"base_uri": "https://localhost:8080/"} id="RHTxPVq5mzZ1" outputId="853ee50a-5685-4dee-d0e0-54fdd94fd9b1" # !python export.py --weights /content/yolov5/runs/train/exp/weights/best.pt --include tfjs # + colab={"base_uri": "https://localhost:8080/"} id="mNmenNm8sy3d" outputId="fa6c58c9-3cf4-4705-a9ca-2e2d16bc47ff" # cd ../.. # + id="fXgt7GpgsyyP" # + colab={"base_uri": "https://localhost:8080/"} id="2MFs6efRmzct" outputId="4899aede-5fad-4549-c87e-15314f652740" # ! git clone https://github.com/mdhasanali3/3d-model-yolov5.git # + id="_qlaBDnVmzfZ" # !git config --global user.email "<EMAIL>" # !git config --global user.name "mdhasanali3" # + colab={"base_uri": "https://localhost:8080/"} id="Lu_zRS4ymzi1" outputId="02348a19-7eb3-4668-e0f6-dd86de41c07d" # !git pull origin # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="HfaU0s2nrA-J" outputId="bd141925-9a7d-4cc4-93ea-90249f7fc1e5" pwd # + colab={"base_uri": "https://localhost:8080/"} id="ffN4QK35rGjn" outputId="7df9aed5-11af-4a7e-9a37-7ee1196a6164" # %cd /content/3d-model-yolov5 # + id="24UUbk_DrG4d" # %mkdir product_C_64b_110e # + id="gdlqNpp4rG_o" # %cp -r /content/yolov5/runs/train/exp/weights/best.pt /content/3d-model-yolov5/product_C_64b_110e # %cp -r /content/yolov5/runs/train/exp/weights/best_web_model /content/3d-model-yolov5/product_C_64b_110e # + colab={"base_uri": "https://localhost:8080/"} id="dNHkFxhTrHDg" outputId="cc28f828-175a-44e4-996e-17a7a4da8dfc" # !git status # + id="as__BK1_rHHC" # !git add -A # + colab={"base_uri": "https://localhost:8080/"} id="_z24yIyprHMX" outputId="3eaa4e31-7644-44ad-f522-7611f199a11b" # !git commit -m "product C model" # + colab={"base_uri": "https://localhost:8080/"} id="lUWMkxZFrpkW" outputId="8c862a83-90ea-41ff-a734-1c6e07333058" # !git remote -v # + id="3yfa0y6Cr69b" # !git remote rm origin # + id="2YnjB9Insa_F" # + id="03pQPSrEsc6_" # !git remote add origin https://gud@github.com/mdhasanali3/3d-model-yolov5.git # + colab={"base_uri": "https://localhost:8080/"} id="W2YRbsq6rsDJ" outputId="b93f86d2-603d-4497-b44c-fb98659f6760" # !git push -u origin main # + id="sRRfJwycruoL"
product_c_3d_mode.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ロジスティック回帰モデル # Pythonの機械学習用ライブラリ`scikit-learn`を使って,ロジスティック回帰モデルを使って簡単な分類問題にチャレンジしてみましょう. # # --- # ### 0.ライブラリのインポート # + import numpy as np import pandas as pd import sklearn import seaborn as sns import matplotlib import matplotlib.pyplot as plt # %matplotlib inline np.set_printoptions(precision=4) # - print("numpy :", np.__version__) print("pandas :", pd.__version__) print("sklearn :", sklearn.__version__) print("seaborn :", sns.__version__) print("matplotlib :", matplotlib.__version__) # ### 1. データの読込・整形 # `sklearn.datasets`からIrisデータセットを読み込みましょう. # make data samples from sklearn.datasets import load_iris iris = load_iris() # 次に,pandas DataFrame()クラスのインスタンスとして,変数`df_feature`, `df_target`, `df`を定義します. # # 参考: [pandas.DataFrame — pandas 1.0.1 documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) # + df_feature = pd.DataFrame(iris.data, columns=iris.feature_names) df_target = pd.DataFrame(iris.target, columns=["target"]) df_target.loc[df_target['target'] == 0, 'target_name'] = "setosa" df_target.loc[df_target['target'] == 1, 'target_name'] = "versicolor" df_target.loc[df_target['target'] == 2, 'target_name'] = "virginica" df = pd.concat([df_target, df_feature], axis=1) df.head(10) # - # データの要約統計量(サンプル数, 平均, 標準偏差, 四分位数, 中央値, 最小値, 最大値など)をみましょう. df.describe().T # データの共分散行列を描画します.<br> # 対角成分は自分との共分散(相関)を表すため常に1.0となります. df.corr() # seabornを使って,共分散行列を可視化してみましょう. # + # Correlation matrix sns.set() cols = ['target', 'sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)'] # プロットしたい特徴量 plt.figure(figsize=(12,10)) plt.title('Pearson Correlation of Iris Features', y=1.01, fontsize=14) sns.heatmap(df[cols].astype(float).corr(), linewidths=0.1, vmax=1.0, cmap=sns.diverging_palette(220, 10, as_cmap=True), square=True, linecolor='white', annot=True) # - # データの散布図行列を描画します.<br> # 相関が大きい説明変数のペアについては, 多重共線性を考えるべきです. # pairplot sns.set() sns.pairplot(df, diag_kind='hist', height=2.0) plt.show() # 分類用のデータセットには,各データに対応するクラスラベルが与えられています.<br> # 上の散布図行列の各点を所属する3つのクラスに応じて色分けしてみましょう. sns.set() sns.pairplot(df, hue='target', diag_kind='hist', height=2.0) plt.show() # ### 2. データの分割 # 変数`iris`から,説明変数と目的変数に相当するデータをそれぞれ取り出し,numpy.ndarray()クラスの変数`X`, `y`へ格納します. X = iris.data y = iris.target # 全データをtrainデータとtestデータに分割します. # すなわち,変数`X`を`X_train`と`X_test`に, # 変数`y`を`y_train`と`y_test`に分けます. # split data by Hold-out-method from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # `print()`で配列の形状を確認してみましょう. print("X_train: ", X_train.shape) print("y_train: ", y_train.shape) print("X_test: ", X_test.shape) print("y_test: ", y_test.shape) # - X_train: 4次元データが120コ格納されている. # - y_train: 1次元データが120コ格納されている. # - X_test: 4次元データが30コ格納されている. # - y_test: 1次元データが30コ格納されている. # ### 3. モデルの作成 # Logistic Regression from sklearn.linear_model import LogisticRegression clf_lr = LogisticRegression(random_state=0, solver='lbfgs', multi_class='auto') # ### 4. モデルへデータを適合させる # fit clf_lr.fit(X_train, y_train) # ### モデルの評価 # predictions y_train_pred = clf_lr.predict(X_train) y_test_pred = clf_lr.predict(X_test) # + # Accuracy from sklearn.metrics import accuracy_score print('Accuracy (train) : {:>.4f}'.format(accuracy_score(y_train, y_train_pred))) print('Accuracy (test) : {:>.4f}'.format(accuracy_score(y_test, y_test_pred))) # + # Confusion matrix from sklearn.metrics import confusion_matrix cmat_train = confusion_matrix(y_train, y_train_pred) cmat_test = confusion_matrix(y_test, y_test_pred) # - def print_confusion_matrix(confusion_matrix, class_names, plt_title='Confusion matrix: ', cmap='BuGn', figsize = (6.25, 5), fontsize=10): df_cm = pd.DataFrame(confusion_matrix, index=class_names, columns=class_names) fig = plt.figure(figsize=figsize) heatmap = sns.heatmap(df_cm, annot=True, fmt="d", cmap=cmap) heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize) heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize) plt.xlabel('Predicted label') plt.ylabel('True label') plt.title(plt_title, fontsize=fontsize*1.25) plt.show() print_confusion_matrix(cmat_train, iris.target_names, plt_title='Confusion matrix (train, 120 samples)') print_confusion_matrix(cmat_test, iris.target_names, plt_title='Confusion matrix (test, 30 samples)')
docs/ml/iris_LogisticRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PODPAC AWS Support from podpac.managers import aws from podpac import settings # ## AWS Session # # The session is used for authentication and setting the region of services # If no credentials are input, then PODPAC will look in the Settings settings['AWS_ACCESS_KEY_ID'] = 'id' settings['AWS_SECRET_ACCESS_KEY'] = 'key' session = aws.Session() session.get_account_id() # If no credentials are input, and PODPAC settings are not set, then PODPAC looks for shared credentials stored by AWS cli settings['AWS_ACCESS_KEY_ID'] = None settings['AWS_SECRET_ACCESS_KEY'] = None session = aws.Session() session.get_account_id() # # Node Manager # + from podpac.managers import aws from podpac import settings # set logging to DEBUG to see build process import logging logger = logging.getLogger("podpac") logger.setLevel(logging.DEBUG) # - node = aws.Lambda() node # longer description node.describe() # build function and all resources # This can take up to 15-20 seconds if all resources need to be created node.build() # + # example usage import podpac from podpac import clinspace sin_coords_node = podpac.algorithm.SinCoords() coordinates = podpac.Coordinates([clinspace(-90, 90, 180), clinspace(90,-90, 180), '2018-01-01'], ['lat', 'lon', 'time']) node = aws.Lambda(source=sin_coords_node) output = node.eval(coordinates) output.plot() pass # - # print logs logs = node.get_logs() logs # + # remove all resources # node.delete(confirm=True) # - # ### Build with customizations # + settings["FUNCTION_NAME"] = "podpac-lambda-autogen-local" settings["S3_BUCKET_NAME"] = "podpac-test-autogen-10001" settings["FUNCTION_ROLE_NAME"] = "podpac-lambda-autogen-local" settings["AWS_BUDGET_AMOUNT"] = 10.0 # $ settings["AWS_BUDGET_EMAIL"] = "<EMAIL>" node = aws.Lambda() node.describe() # - node.build() node.describe() # ### Function utilities node.update_function() # ### Build with restrictions # # Restrict function evaluation to specific node definitions. # + # make node import podpac from podpac import clinspace from podpac.managers import aws from podpac import settings # set logging to DEBUG to see build process import logging logger = logging.getLogger("podpac") logger.setLevel(logging.DEBUG) # + settings["FUNCTION_NAME"] = "podpac-lambda-autogen-restricted" settings["S3_BUCKET_NAME"] = "podpac-test-autogen-10001" settings["FUNCTION_ROLE_NAME"] = "podpac-lambda-autogen-local" # make node that we want to evaluate sin_coords_node = podpac.algorithm.SinCoords() # make lambda node that is restricted only to this node node = aws.Lambda(function_restrict_pipelines=[sin_coords_node.hash]) node.describe() # - node.build() # + # eval at coordinates coordinates = podpac.Coordinates([clinspace(-90, 90, 180), clinspace(90,-90, 180), '2018-01-01'], ['lat', 'lon', 'time']) # this will work node = aws.Lambda(source=sin_coords_node) output = node.eval(coordinates) output.plot() pass # - # this should throw an error sin_coords_node = podpac.algorithm.SinCoords(units="m") node = aws.Lambda(source=sin_coords_node) output = node.eval(coordinates) # # AWS Utilities # ## S3 from podpac.managers import aws session = aws.Session() bucket = "podpac-test-bucket" tags = {"owner": "creare"} aws.create_bucket(session, bucket, bucket_tags=tags) aws.get_bucket(session, bucket) # + # aws.delete_bucket(session, bucket) # this works if no objects are in bucket # - aws.put_object(session, bucket, "test/object.ipynb", "nexrad.ipynb") aws.delete_bucket(session, bucket) # this will fail with object in bucket aws.delete_bucket(session, bucket, delete_objects="True") # this will fail with object in bucket # ## IAM Roles from podpac.managers import aws session = aws.Session() role_name = "podpac-test-role" tags = {"owner": "creare"} role = aws.create_role(session, role_name, role_tags=tags) aws.get_role(session, role_name) # + # aws.delete_role(session, role_name) # don't delete here since we will need this create a function # - # ## Lambda Functions from podpac.managers import aws session = aws.Session() function = "podpac-test-function" function_role_arn = role['Arn'] # see IAM Roles above function_handler = "handler.handler" function_source_bucket = "podpac-dist" function_source_key = "1.1.0/podpac_dist.zip" tags = {"owner": "creare"} aws.create_function(session, function, function_role_arn=function_role_arn, function_handler=function_handler, function_source_bucket=function_source_bucket, function_source_key=function_source_key, function_tags=tags) aws.get_function(session, function) aws.delete_function(session, "podpac-test-function") # create from local zip file function = "podpac-test-function-local" aws.create_function(session, function, function_role_arn=function_role_arn, function_handler=function_handler, function_zip_file="podpac_dist.zip", function_tags=tags) aws.delete_function(session, function) # ## API Gateway from podpac.managers import aws session = aws.Session() api_name = "podpac-lambda-autogen-api" api_endpoint = "eval" aws.get_api(session, api_name, api_endpoint) apigateway = session.client("apigateway") response = apigateway.get_stages(restApiId="fs3mqqombi") apigateway.get_resources(restApiId="fs3mqqmbi") # ## Budget Resources from podpac.managers import aws session = aws.Session() budget_amount = 100 # USD budget_email = "<EMAIL>" budget_name = "my-overall-podpac-budget" aws.create_budget(session, budget_amount, budget_email, budget_name=budget_name) # ## Get Logs from podpac.managers import aws session = aws.Session() log_group_name = "/aws/lambda/podpac-test-function" aws.get_logs(session, log_group_name)
notebooks/scratch/test-lambda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} from ner.utils.ner import extract_entities from ner.utils.process_data import normalize_text # - from vncorenlp import VnCoreNLP annotator = VnCoreNLP(address="http://1172.16.17.32", port=9000) # + import torch import numpy as np from ner.utils.ner import extract_entities from ner.utils.process_data import normalize_text from transformers import AutoModel, AutoTokenizer, AutoModelForTokenClassification # load phobert-ner đã được fine-tuned phobert_ner = AutoModelForTokenClassification.from_pretrained("/home/phamvanhanh/PycharmProjects/DataScience_Project/PredictRentedApartment/ner/checkpoint-2750") # load tokenizer của phobert tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base", use_fast=False) # + pycharm={"name": "#%%\n"} text = """chính chủ cho thuê căn hộ tập thể 8/3 nhà a6, tầng 1 đầu hồi, phố 8/3. diện tích 40m2. phòng khách (01), phòng ngủ (01). ngõ rộng sát đường 8/3. nhà vệ sinh, bếp, sân ngoài trời riêng biệt, sinh hoạt, để xe máy…. gần sát chợ 8/3, bệnh viện thanh nhàn, trường mẫu giáo, trường học... phù hợp cho hộ gia đình. an ninh tốt. hàng xóm thân thiện giá: 5 triệu/tháng tiền điện, nước giá nhà nước theo tiêu chuẩn hộ gia đình. đ/c: nhà a6, phố 8/3, phường quỳnh mai, quận hai bà trưng, hà nội.""" # + pycharm={"name": "#%%\n"} cleaned_text = normalize_text(raw_description=text, annotator=annotator) # + pycharm={"name": "#%%\n"} print(cleaned_text) # + pycharm={"name": "#%%\n"} results = extract_entities(phobert_ner, tokenizer, cleaned_text) results # + pycharm={"name": "#%%\n"} columns = ['LOCATION', 'PHONE_NUMBER', 'NEW_TYPE', 'BEDROOM_NUMBER', 'BATH_ROOM_NUMBER', 'LIVING_ROOM', 'AREA', 'APARTMENT_TYPE', 'PROJECT', 'INVESTOR', 'FLOOR', 'FURNITURE_TYPE', 'FURNITURE', 'CONVENIENT', 'PRICE', 'URL', 'RAW_DESCRIPTION']
ner/phobert-ner prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from sklearn.linear_model import LinearRegression df=pd.read_csv('Homeprices.csv') df dummies=pd.get_dummies(df.town) dummies merged = pd.concat([df,dummies],axis='columns') merged final=merged.drop(['town','west windsor'],axis='columns') final model=LinearRegression() x=final.drop('price',axis='columns') x y=final.price y model.fit(x,y) model.predict([[3400,0,0]]) model.score(x,y) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() dfle = df dfle.town = le.fit_transform(dfle.town) dfle X = dfle[['town','area']] X y = dfle.price.values y from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer ct = ColumnTransformer([('town', OneHotEncoder(), [0])], remainder = 'passthrough') X = ct.fit_transform(X) X X = X[:,1:] model.fit(X,y) model.predict([[0,1,3400]])
OneHotEncoding &Dummy Variable/OneHotEncoding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [**Blueprints for Text Analysis Using Python**](https://github.com/blueprints-for-text-analytics-python/blueprints-text) # <NAME>, <NAME>, <NAME> # # **If you like the book or the code examples here, please leave a friendly comment on [Amazon.com](https://www.amazon.com/Blueprints-Text-Analytics-Using-Python/dp/149207408X)!** # <img src="../rating.png" width="100"/> # # # # Chapter 10:<div class='tocSkip'/> # # Exploring Semantic Relationships with Word Embeddings # ## Remark<div class='tocSkip'/> # # The code in this notebook differs slightly from the printed book. For example we frequently use pretty print (`pp.pprint`) instead of `print` and `tqdm`'s `progress_apply` instead of Pandas' `apply`. # # Moreover, several layout and formatting commands, like `figsize` to control figure size or subplot commands are removed in the book. # # You may also find some lines marked with three hashes ###. Those are not in the book as well as they don't contribute to the concept. # # All of this is done to simplify the code in the book and put the focus on the important parts instead of formatting. # ## Setup<div class='tocSkip'/> # # Set directory locations. If working on Google Colab: copy files and install required libraries. # + import sys, os ON_COLAB = 'google.colab' in sys.modules if ON_COLAB: GIT_ROOT = 'https://github.com/blueprints-for-text-analytics-python/blueprints-text/raw/master' os.system(f'wget {GIT_ROOT}/ch10/setup.py') # %run -i setup.py # - # ## Load Python Settings<div class="tocSkip"/> # # Common imports, defaults for formatting in Matplotlib, Pandas etc. # + # %run "$BASE_DIR/settings.py" # %reload_ext autoreload # %autoreload 2 # %config InlineBackend.figure_format = 'png' # set precision for similarity values # %precision 3 np.set_printoptions(suppress=True) # no scientific for small numbers # path to import blueprints packages sys.path.append(BASE_DIR + '/packages') # - # ## What you will learn and what we will build # # # The Case for Semantic Embeddings # ## Word Embeddings # # ## Analogy Reasoning with Word Embeddings # # ## Types of Embeddings # # ### Word2Vec # # ### GloVe # ### FastText # ### Deep Contextualized Embeddings # # # Blueprint: Similarity Queries on Pre-Trained Models # ## Loading a Pretrained Model # import os os.environ['GENSIM_DATA_DIR'] = './models' # pandas number format pd.options.display.float_format = '{:.0f}'.format # + import gensim.downloader as api info_df = pd.DataFrame.from_dict(api.info()['models'], orient='index') info_df[['file_size', 'base_dataset', 'parameters']].head(5) # - # full list of columns info_df.head(3) pd.options.display.float_format = '{:.2f}'.format model = api.load("glove-wiki-gigaword-50") # ## Similarity Queries # # %precision 2 # + v_king = model['king'] v_queen = model['queen'] print("Vector size:", model.vector_size) print("v_king =", v_king[:10]) print("v_queen =", v_queen[:10]) print("similarity:", model.similarity('king', 'queen')) # - # %precision 3 model.most_similar('king', topn=3) # + v_lion = model['lion'] v_nano = model['nanotechnology'] model.cosine_similarities(v_king, [v_queen, v_lion, v_nano]) # - model.most_similar(positive=['woman', 'king'], negative=['man'], topn=3) model.most_similar(positive=['paris', 'germany'], negative=['france'], topn=3) model.most_similar(positive=['france', 'capital'], topn=1) model.most_similar(positive=['greece', 'capital'], topn=3) # # Blueprints for Training and Evaluation of Your Own Embeddings # # ## Data Preparation # # + db_name = "reddit-selfposts.db" db_name = f"{BASE_DIR}/data/reddit-selfposts/reddit-selfposts-ch10.db" ### real location con = sqlite3.connect(db_name) df = pd.read_sql("select subreddit, lemmas, text from posts_nlp", con) con.close() df['lemmas'] = df['lemmas'].str.lower().str.split() # lower case tokens sents = df['lemmas'] # our training "sentences" # - # ### Phrases # # + from gensim.models.phrases import Phrases, npmi_scorer import gensim # solved compatibility issue for Gensim 4.x if gensim.__version__[0] > '3': # gensim 4.x string delimiter delim = '-' else: # gensim 3.x - byte delimiter delim = b'-' phrases = Phrases(sents, min_count=10, threshold=0.3, delimiter=delim, scoring=npmi_scorer) # - sent = "I had to replace the timing belt in my mercedes c300".split() phrased = phrases[sent] print('|'.join(phrased)) # + # solved compatibility issue for Gensim 4.x if gensim.__version__[0] > '3': # gensim 4.x - find_phrases / string phrases phrase_df = pd.DataFrame(phrases.find_phrases(sents), columns =['phrase', 'score']) phrase_df = pd.DataFrame.from_dict(phrases.find_phrases(sents), orient='index').reset_index() phrase_df.columns = ['phrase', 'score'] phrase_df = phrase_df[['phrase', 'score']].drop_duplicates() \ .sort_values(by='score', ascending=False).reset_index(drop=True) else: # gensim 3.x - export_phrases / byte phrases phrase_df = pd.DataFrame(phrases.export_phrases(sents, out_delimiter=delim), columns =['phrase', 'score']) phrase_df = phrase_df[['phrase', 'score']].drop_duplicates() \ .sort_values(by='score', ascending=False).reset_index(drop=True) phrase_df['phrase'] = phrase_df['phrase'].map(lambda p: p.decode('utf-8')) # - phrase_df[phrase_df['phrase'].str.contains('mercedes')] .head(3) # show some additional phrases with score > 0.7 phrase_df.query('score > 0.7').sample(100) # + logging.getLogger().setLevel(logging.WARNING) ### sents = df['lemmas'] ### like above phrases = Phrases(sents, min_count=10, threshold=0.7, delimiter=delim, scoring=npmi_scorer) df['phrased_lemmas'] = df['lemmas'].progress_map(lambda s: phrases[s]) sents = df['phrased_lemmas'] # - # ## Blueprint: Training Models with Gensim # # + # for Gensim training import logging logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s', level=logging.INFO) logging.getLogger().setLevel(logging.INFO) # + from gensim.models import Word2Vec model = Word2Vec(sents, # tokenized input sentences size=100, # size of word vectors (default 100) window=2, # context window size (default 5) sg=1, # use skip-gram (default 0 = CBOW) negative=5, # number of negative samples (default 5) min_count=5, # ignore infrequent words (default 5) workers=4, # number of threads (default 3) iter=5) # number of epochs (default 5) # - logging.getLogger().setLevel(logging.ERROR) model.save('./models/autos_w2v_100_2_full.bin') model = Word2Vec.load('./models/autos_w2v_100_2_full.bin') # **This takes several minutes to run.** Please be patient, you need this to continue. # + from gensim.models import Word2Vec, FastText model_path = './models' model_prefix = 'autos' param_grid = {'w2v': {'variant': ['cbow', 'sg'], 'window': [2, 5, 30]}, 'ft': {'variant': ['sg'], 'window': [5]}} size = 100 for algo, params in param_grid.items(): print(algo) ### for variant in params['variant']: sg = 1 if variant == 'sg' else 0 for window in params['window']: print(f" Variant: {variant}, Window: {window}, Size: {size}") ### np.random.seed(1) ### to ensure repeatability if algo == 'w2v': model = Word2Vec(sents, size=size, window=window, sg=sg) else: model = FastText(sents, size=size, window=window, sg=sg) file_name = f"{model_path}/{model_prefix}_{algo}_{variant}_{window}" model.wv.save_word2vec_format(file_name + '.bin', binary=True) # - # ## Blueprint: Evaluating Different Models # # + from gensim.models import KeyedVectors model_path = './models' ### names = ['autos_w2v_cbow_2', 'autos_w2v_sg_2', 'autos_w2v_sg_5', 'autos_w2v_sg_30', 'autos_ft_sg_5'] models = {} for name in names: file_name = f"{model_path}/{name}.bin" print(f"Loading {file_name}") ### models[name] = KeyedVectors.load_word2vec_format(file_name, binary=True) # - def compare_models(models, **kwargs): df = pd.DataFrame() for name, model in models: df[name] = [f"{word} {score:.3f}" for word, score in model.most_similar(**kwargs)] df.index = df.index + 1 # let row index start at 1 return df compare_models([(n, models[n]) for n in names], positive='bmw', topn=10) # ### Looking for Similar Concepts # # ### Analogy Reasoning on our own Models # # **Note** that your results may be slightly different to the ones printed in the book because of random initialization. compare_models([(n, models[n]) for n in names], positive=['f150', 'toyota'], negative=['ford'], topn=5).T # try a different analogy compare_models([(n, models[n]) for n in names], positive=['x3', 'audi'], negative=['bmw'], topn=5).T # and another one compare_models([(n, models[n]) for n in names], positive=['spark-plug'], negative=[], topn=5) # # Blueprints for Visualizing Embeddings # # ## Blueprint: Applying Dimensionality Reduction # # + from umap import UMAP model = models['autos_w2v_sg_30'] words = model.vocab wv = [model[word] for word in words] reducer = UMAP(n_components=2, metric='cosine', n_neighbors = 15, min_dist=0.1, random_state = 12) reduced_wv = reducer.fit_transform(wv) # + import plotly.express as px px.defaults.template = "plotly_white" ### plotly style plot_df = pd.DataFrame.from_records(reduced_wv, columns=['x', 'y']) plot_df['word'] = words params = {'hover_data': {c: False for c in plot_df.columns}, 'hover_name': 'word'} params.update({'width': 800, 'height': 600}) ### fig = px.scatter(plot_df, x="x", y="y", opacity=0.3, size_max=3, **params) fig.update_traces(marker={'line': {'width': 0}}) ### fig.update_xaxes(showticklabels=False, showgrid=True, zeroline=False, visible=True) ### fig.update_yaxes(showticklabels=False, showgrid=True, zeroline=False, visible=True) ### fig.show() # + from blueprints.embeddings import plot_embeddings model = models['autos_w2v_sg_30'] ### search = ['ford', 'lexus', 'vw', 'hyundai', 'goodyear', 'spark-plug', 'florida', 'navigation'] _ = plot_embeddings(model, search, topn=50, show_all=True, labels=False, algo='umap', n_neighbors=15, min_dist=0.1, random_state=12) # + model = models['autos_w2v_sg_30'] ### search = ['ford', 'bmw', 'toyota', 'tesla', 'audi', 'mercedes', 'hyundai'] _ = plot_embeddings(model, search, topn=10, show_all=False, labels=True, algo='umap', n_neighbors=15, min_dist=10, spread=25, random_state=7) # - _ = plot_embeddings(model, search, topn=30, n_dims=3, algo='umap', n_neighbors=15, min_dist=.1, spread=40, random_state=23) # + # PCA plot (not in the book) - better to explain analogies: # difference vectors of pickup trucks "f150"-"ford", "tacoma"-"toyota" and # "frontier"-"nissan" are almost parallel. # "x5"-"bmw" is pointing to a somewhat different direction, but "x5" is not a pickup model = models['autos_w2v_sg_5'] search = ['ford', 'f150', 'toyota', 'tacoma', 'nissan', 'frontier', 'bmw', 'x5'] _ = plot_embeddings(model, search, topn=0, algo='pca', labels=True, colors=False) # - # ## Blueprint: Using Tensorflow Embedding Projector # # + import csv model_path = './models' ### name = 'autos_w2v_sg_30' model = models[name] with open(f'{model_path}/{name}_words.tsv', 'w', encoding='utf-8') as tsvfile: tsvfile.write('\n'.join(model.vocab)) with open(f'{model_path}/{name}_vecs.tsv', 'w', encoding='utf-8') as tsvfile: writer = csv.writer(tsvfile, delimiter='\t', dialect=csv.unix_dialect, quoting=csv.QUOTE_MINIMAL) for w in model.vocab: _ = writer.writerow(model[w].tolist()) # - # ## Blueprint: Constructing a Similarity Tree # # + import networkx as nx from collections import deque def sim_tree(model, word, top_n, max_dist): graph = nx.Graph() graph.add_node(word, dist=0) to_visit = deque([word]) while len(to_visit) > 0: source = to_visit.popleft() # visit next node dist = graph.nodes[source]['dist']+1 if dist <= max_dist: # discover new nodes for target, sim in model.most_similar(source, topn=top_n): if target not in graph: to_visit.append(target) graph.add_node(target, dist=dist) graph.add_edge(source, target, sim=sim, dist=dist) return graph # + def plt_add_margin(pos, x_factor=0.1, y_factor=0.1): # rescales the image s.t. all captions fit onto the canvas x_values, y_values = zip(*pos.values()) x_max = max(x_values) x_min = min(x_values) y_max = max(y_values) y_min = min(y_values) x_margin = (x_max - x_min) * x_factor y_margin = (y_max - y_min) * y_factor # return (x_min - x_margin, x_max + x_margin), (y_min - y_margin, y_max + y_margin) plt.xlim(x_min - x_margin, x_max + x_margin) plt.ylim(y_min - y_margin, y_max + y_margin) def scale_weights(graph, minw=1, maxw=8): # rescale similarity to interval [minw, maxw] for display sims = [graph[s][t]['sim'] for (s, t) in graph.edges] min_sim, max_sim = min(sims), max(sims) for source, target in graph.edges: sim = graph[source][target]['sim'] graph[source][target]['sim'] = (sim-min_sim)/(max_sim-min_sim)*(maxw-minw)+minw return graph def solve_graphviz_problems(graph): # Graphviz has problems with unicode # this is to prevent errors during positioning def clean(n): n = n.replace(',', '') n = n.encode().decode('ascii', errors='ignore') n = re.sub(r'[{}\[\]]', '-', n) n = re.sub(r'^\-', '', n) return n node_map = {n: clean(n) for n in graph.nodes} # remove empty nodes for n, m in node_map.items(): if len(m) == 0: graph.remove_node(n) return nx.relabel_nodes(graph, node_map) # + from networkx.drawing.nx_pydot import graphviz_layout def plot_tree(graph, node_size=1000, font_size=12): graph = solve_graphviz_problems(graph) ### pos = graphviz_layout(graph, prog='twopi', root=list(graph.nodes)[0]) plt.figure(figsize=(10, 4), dpi=200) ### plt.grid(b=None) ### hide box plt.box(False) ### hide grid plt_add_margin(pos) ### just for layout colors = [graph.nodes[n]['dist'] for n in graph] # colorize by distance nx.draw_networkx_nodes(graph, pos, node_size=node_size, node_color=colors, cmap='Set1', alpha=0.4) nx.draw_networkx_labels(graph, pos, font_size=font_size) scale_weights(graph) ### not in book for (n1, n2, sim) in graph.edges(data='sim'): nx.draw_networkx_edges(graph, pos, [(n1, n2)], width=sim, alpha=0.2) plt.show() # - model = models['autos_w2v_sg_2'] graph = sim_tree(model, 'noise', top_n=10, max_dist=3) plot_tree(graph, node_size=500, font_size=8) model = models['autos_w2v_sg_30'] graph = sim_tree(model, 'spark-plug', top_n=8, max_dist=2) plot_tree(graph, node_size=500, font_size=8) # # Closing Remarks # # # Further Reading #
ch10/Embeddings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.6 64-bit (''env'': venv)' # language: python # name: python3 # --- # + import numpy as np import pandas as pd from feature_engine import discretisation as dsc from feature_engine import encoding as ce from feature_engine import outliers as outr from feature_engine import transformation as vt from feature_engine.imputation import MeanMedianImputer from sklearn.model_selection import train_test_split # + def load_titanic(): data = pd.read_csv("https://www.openml.org/data/get_csv/16826755/phpMYEkMl") data = data.replace("?", np.nan) data["cabin"] = data["cabin"].astype(str).str[0] data["pclass"] = data["pclass"].astype("O") data["embarked"].fillna("C", inplace=True) return data # Load dataset def load_titanic2(): data = pd.read_csv("https://www.openml.org/data/get_csv/16826755/phpMYEkMl") data = data.replace("?", np.nan) data["cabin"] = data["cabin"].astype(str).str[0] data["pclass"] = data["pclass"].astype("O") data["embarked"].fillna("C", inplace=True) data["fare"] = data["fare"].astype("float") data["fare"].fillna(data["fare"].median(), inplace=True) data["age"] = data["age"].astype("float") data["age"].fillna(data["age"].median(), inplace=True) return data # + # Load dataset data = pd.read_csv("creditApprovalUCI.csv") data.head() # + # Separate into train and test sets X_train, X_test, y_train, y_test = train_test_split( data.drop("A16", axis=1), data["A16"], test_size=0.3, random_state=0) # numerical variables with missing data X_train[['A8', 'A11']].isnull().median() # + # Set up the imputer median_imputer = MeanMedianImputer( imputation_method="median", variables=["A2", "A3", "A8", "A11", "A15"]) # fit the imputer median_imputer.fit(X_train) median_imputer.imputer_dict_ # + # Load dataset data = pd.read_csv("creditApprovalUCI.csv") # Separate into train and test sets X_train, X_test, y_train, y_test = train_test_split( data.drop("A16", axis=1), data["A16"], test_size=0.3, random_state=0) # Set up the imputer median_imputer = MeanMedianImputer( imputation_method="median", variables=["A2", "A3", "A8", "A11", "A15"]) # fit the imputer median_imputer.fit(X_train) # transform the data X_train = median_imputer.transform(X_train) X_test = median_imputer.transform(X_test) # - X_train.head() # + from feature_engine import encoding as ce # Load dataset data = load_titanic() # Separate into train and test sets X_train, X_test, y_train, y_test = train_test_split( data.drop(["survived", "name", "ticket"], axis=1), data["survived"], test_size=0.3, random_state=0,) # set up the encoder encoder = ce.CountFrequencyEncoder( encoding_method="frequency", variables=["cabin", "pclass", "embarked"]) # fit the encoder encoder.fit(X_train) # transform the data train_t = encoder.transform(X_train) test_t = encoder.transform(X_test) # + from feature_engine import transformation as vt # Load dataset data = pd.read_csv("houseprice.csv") # Separate into train and test sets X_train, X_test, y_train, y_test = train_test_split( data.drop(["Id", "SalePrice"], axis=1), data["SalePrice"], test_size=0.3, random_state=0,) # set up the variable transformer tf = vt.BoxCoxTransformer(variables=["LotArea", "GrLivArea"]) # fit the transformer tf.fit(X_train) # transform the data train_t = tf.transform(X_train) test_t = tf.transform(X_test) # + from feature_engine import discretisation as dsc # Load dataset data = data = pd.read_csv("houseprice.csv") # Separate into train and test sets X_train, X_test, y_train, y_test = train_test_split( data.drop(["Id", "SalePrice"], axis=1), data["SalePrice"], test_size=0.3, random_state=0,) # set up the discretisation transformer disc = dsc.DecisionTreeDiscretiser( cv=3, scoring="neg_mean_squared_error", variables=["LotArea", "GrLivArea"], regression=True,) # fit the transformer disc.fit(X_train, y_train) # transform the data train_t = disc.transform(X_train) test_t = disc.transform(X_test) # + from feature_engine import outliers as outr # Load dataset data = load_titanic2() # Separate into train and test sets X_train, X_test, y_train, y_test = train_test_split( data.drop(["survived", "name", "ticket"], axis=1), data["survived"], test_size=0.3, random_state=0,) # set up the capper capper = outr.Winsorizer( capping_method="gaussian", tail="right", fold=3, variables=["age", "fare"]) # fit the capper capper.fit(X_train) # transform the data train_t = capper.transform(X_train) test_t = capper.transform(X_test) # -
8__Feature_Engineering_Automation/feature.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: myenv # language: python # name: myenv # --- # # Hcad data of Commercial Real Estate Building # Imports import time import sys from zipfile import ZipFile import pandas as pd import pandas.io.sql as pdsql import glob, os import numpy as np # Datetime for new column import datetime # Imports for mySQL from sqlalchemy import create_engine, event, DateTime from db_setup import mysql_user, mysql_password, db_name import mysql.connector mydir = os.path.abspath('./HotelOccupancyTaxData') mydir # ## Adding Headers to Txt files from hcad.org # + land = ['ACCOUNT', 'LINE_NUMBER', 'LAND_USE_CODE', 'LAND_USE_DSCR', 'SITE_CD', 'SITE_CD_DSCR', 'SITE_ADJ', 'UNIT_TYPE', 'UNITS', 'SIZE_FACTOR', 'SITE_FACT', 'APPR_OVERRIDE_FACT', 'APPR_OVERRIDE_REAS', 'TOT_ADJ', 'UNIT_PRICE', 'ADJ_UNIT_PRICE', 'VALUE', 'OVERRIDE_VALUE'] real_account = ['ACCOUNT', 'TAX_YEAR', 'MAILTO', 'MAIL_ADDR_1', 'MAIL_ADDR_2', 'MAIL_CITY', 'MAIL_STATE', 'MAIL_ZIP', 'MAIL_COUNTRY', 'UNDELIVERABLE', 'STR_PFX', 'STR_NUM', 'STR_NUM_SFX', 'STR_NAME', 'STR_SFX', 'STR_SFX_DIR', 'STR_UNIT', 'SITE_ADDR_1', 'SITE_ADDR_2', 'SITE_ADDR_3', 'STATE_CLASS', 'SCHOOL_DIST', 'MAP_FACET', 'KEY_MAP', 'NEIGHBORHOOD_CODE', 'NEIGHBORHOOD_GROUP', 'MARKET_AREA_1', 'MARKET_AREA_1_DSCR', 'MARKET_AREA_2', 'MARKET_AREA_2_DSCR', 'ECON_AREA', 'ECON_BLD_CLASS', 'CENTER_CODE', 'YR_IMPR', 'YR_ANNEXED', 'SPLT_DT', 'DSC_CD', 'NXT_BUILDING', 'TOTAL_BUILDING_ARE', 'TOTAL_LAND_AREA', 'ACREAGE', 'CAP_ACCOUNT', 'SHARED_CAD_CODE', 'LAND_VALUE', 'IMPROVEMENT_VALUE', 'EXTRA_FEATURES_VAL', 'AG_VALUE', 'ASSESSED_VALUE', 'TOTAL_APPRAISED_VA', 'TOTAL_MARKET_VALUE', 'PRIOR_LND_VALUE', 'PRIOR_IMPR_VALUE', 'PRIOR_X_FEATURES_V', 'PRIOR_AG_VALUE', 'PRIOR_TOTAL_APPRAI', 'PRIOR_TOTAL_MARKET', 'NEW_CONSTRUCTION_V', 'TOTAL_RCN_VALUE', 'VALUE_STATUS', 'NOTICED', 'NOTICE_DATE', 'PROTESTED', 'CERTIFIED_DATE', 'LAST_INSPECTED_DAT', 'LAST_INSPECTED_BY', 'NEW_OWNER_DATE', 'LEGAL_DSCR_1', 'LEGAL_DSCR_2', 'LEGAL_DSCR_3', 'LEGAL_DSCR_4', 'JURS'] # - # #### Unzipping files within specified directory # extract all files i = 0 for file in glob.glob(mydir + '/*.zip'): i += 1 zip = ZipFile(file, 'r') print(f'Extracting file {i}') zip.extractall(mydir) zip.close() print('Done!') print(f"File {i}, extracted: {file}\n") time.sleep(1) # os.remove(file) # ## Specified Text file to Dataframe for file in glob.glob(mydir + '/*.txt'): if "land" in file: df_land = pd.read_table(file, sep='\t', quotechar='"', error_bad_lines=False, # widths=[11, 50, 40, 20, 2, 5, 3, 2, 10, 1, 8, 8, 2, 10, 8, 8, 2, 1, 3, 8, 6], header=None, names=land, index_col=False, engine= 'python') df_land = df_land.append(df_land) # os.remove(file) print('Added the ' + file + ' into land text file') print('deleted the file ' + str(file)) elif "real_acct" in file: df_real_acct = pd.read_table(file, sep='\t', quotechar='"', error_bad_lines=False, # widths=[11, 50, 40, 20, 2, 5, 3, 2, 10, 1, 8, 8, 2, 10, 8, 8, 2, 1, 3, 8, 6], header=None, names=real_account, index_col=False, engine= 'python') df_real_acct = df_real_acct.append(df_real_acct) # os.remove(file) print('Added the ' + file + ' into real_acct text file') print('deleted the file ' + str(file)) else: # os.remove(file) print('File not being used: ' + str(file)) # ## Text file Land from Real_Building_land.zip # * and use, acreage, and land units. # * file contains the id's with the site cd of: # * 4353 -- Office Bldgs. Low-Rise (1 to 4 Stories) # * 4354 df_land.head() # #### Converting columns to int # + # SITE_ADJ,UNITS,SIZE_FACTOR,SITE_FACT,TOT_ADJ df_land['SITE_ADJ'] = df_land['SITE_ADJ'].fillna(0) df_land['UNITS'] = df_land['UNITS'].fillna(0) df_land['SIZE_FACTOR'] = df_land['SIZE_FACTOR'].fillna(0) df_land['SITE_FACT'] = df_land['SITE_FACT'].fillna(0) df_land['TOT_ADJ'] = df_land['TOT_ADJ'].fillna(0) df_land['SITE_ADJ'] = df_land['SITE_ADJ'].astype(np.int64) df_land['UNITS'] = df_land['UNITS'].astype(np.int64) df_land['SIZE_FACTOR'] = df_land['SIZE_FACTOR'].astype(np.int64) df_land['SITE_FACT'] = df_land['SITE_FACT'].astype(np.int64) df_land['TOT_ADJ'] = df_land['TOT_ADJ'].astype(np.int64) # - # #### Count columns df_land.count() # #### Filter data to specified building code for 4353 and 4354 df_land = df_land[(df_land['SITE_CD'] == '4353') | (df_land['SITE_CD'] == '4354')] df_land.head() # #### Counting filtered data df_land.count() # #### Saving filtered land data path = os.path.abspath('HotelOccupancyTaxData/landData.csv') df_land.to_csv(path, index = False) df_other_biz = df_other_biz[df_other_biz['ACCOUNT'].astype(int).isin(df_land['ACCOUNT'].astype(int))] df_other_biz.count() path = os.path.abspath('HotelOccupancyTaxData/filteredOtherBuildingData.csv') df_other_biz.to_csv(path, index = False) # ## Real Account from Real Property Real_acct_owner.zip df_real_acct.head(2) # #### Count of Real Acct DF df_real_acct.count() # ## Filtering Account Data using the Accounts from the Land DF that have identified commercial real estate buildings df_real_acct = df_real_acct[df_real_acct['ACCOUNT'].astype(int).isin(df_land['ACCOUNT'].astype(int))] df_real_acct.count() # #### Saving the raw data of commercial building data path = os.path.abspath('HotelOccupancyTaxData/RawCommercialBuildings.csv') df_real_acct.to_csv(path, index = False) # #### Imports to connect to mysql database from sqlalchemy import create_engine, event, DateTime from db_setup import mysql_user, mysql_password, db_name import mysql.connector import datetime # #### Building Engine connection_string = f"{mysql_user}:{mysql_password}@localhost:3306/{db_name}?charset=utf8" engine = create_engine(f'mysql://{connection_string}') # #### Checking connectino by viewing the tables engine.table_names() # #### Selecting the specified table building_account to drop the data to bld_acct_db = pdsql.read_sql("SELECT * FROM building_account",engine) # #### Filtering data that is within the database, if any try: df_real_acct = df_real_acct[~df_real_acct['ACCOUNT'].astype(int).isin(bld_acct_db['ACCOUNT'].astype(int))] if df_real_acct.size != 0: print(f"There are {len(bld_acct_db)} data attributes in building_account table from the database\n{len(df_real_acct)} new companies, based on tax payer number from filtered data df_tact") df_real_acct.to_sql(name='building_account', con=engine, if_exists='append', index=False, chunksize=1000) print(f"building_account to database append, completed") f = open('HotelOccupancyTaxData/formattedData/DBUploadRecord.txt','a+') f.write(f'\building_account.csv, {len(df_real_acct)}, building_account table, {len(bld_acct_db)}\n') f.close() else: print("No new data") f = open('HotelOccupancyTaxData/formattedData/DBUploadRecord.txt','a+') f.write(f'building_account.csv, {len(df_real_acct)}, building_account table, {len(bld_acct_db)}\n') f.write('-------------------------------------------------------------\n') f.close() except Exception as e: print(f"Something went wrong, df_ftact was not able to append to database or no new data: {e}") # ## Cleaning up Dataframe of Building Accounts and only using needed columns df_real_acct = df_real_acct[['ACCOUNT','TAX_YEAR','SITE_ADDR_1','SITE_ADDR_2','SITE_ADDR_3','MARKET_AREA_1_DSCR','ECON_BLD_CLASS','YR_IMPR','TOTAL_BUILDING_ARE']] # #### Filtering data with no duplicates df_real_acct = df_real_acct.drop_duplicates(subset='ACCOUNT', keep='first') df_real_acct.count() # #### Removing empty columns for zipcode and address df_real_acct.dropna(subset=['SITE_ADDR_1', 'SITE_ADDR_3']) df_real_acct.head() # #### Check data that has been dropped df_real_acct.count() # #### Filter to only Houston location df_real_acct= df_real_acct[df_real_acct['SITE_ADDR_2']=='HOUSTON'] df_real_acct.head() # #### Save to csv file path = os.path.abspath('HotelOccupancyTaxData/CommercialBuildings.csv') df_real_acct.to_csv(path, index = False)
CRE_Marketing_Data/BuildingData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pilot study: relationship between peak tangential pedal force angle and power/cadence using Wattbike data # ## Initialization # + from datetime import date import pandas as pd from sklearn import linear_model from wblib.models import WattbikeDataFrame # %matplotlib inline # - user_id = 'u-1756bbba7e2a350' wdf = WattbikeDataFrame() wdf = wdf.load_for_user(user_id, after=date(2017, 1, 1), before=date(2017, 12, 1)) wdf = wdf.assign( mean_peak_angle=wdf[['right_max_angle', 'left_max_angle']].mean(axis=1) ) wdf_without_polar_plots = wdf[['session_id', 'power', 'cadence', 'mean_peak_angle']].dropna() wdf_without_polar_plots.to_csv('./data.csv') wdf_without_polar_plots = WattbikeDataFrame(pd.read_csv('./data.csv')) print('Memory usage of all data: {}MB'.format(int(round(wdf.memory_usage().sum()/1000000)))) print('Number of sessions: {}'.format(len(set(wdf.session_id)))) print('Number of pedal revolutions: {}'.format(len(wdf_without_polar_plots))) # ## Introduction # The pedaling motion is characterized by a circular movement where the goal is to deliver as much work as possible by maximizing the tangential (i.e. perpendicular to the crank arms) force on the pedals. It is known that the force curve describing this tangential force is not constant throughout a pedal revolution but instead contains dead spots where the crank arms are vertical (see figure 1; most likely because it is difficult for cyclists to apply the horizontal tangential force at those locations). The force curve also contains a maximum during the downward motion of the cranks at which the force is the highest. The angle at which this maximum force occurs is the peak tangential pedal force angle. This article will focus on this angle to gain an understanding on the relationship between power and cadence and this angle. # The tangential forces during 1 revolution can be plotted in 2 ways: With a traditional Cartesian coordinate system (figure 1) or with polar coordinate system (figure 2). The latter gives a nice visual representation of the pedal revolution that makes it slightly easier to compare the timing of the forces during the downward motion of the left and right leg. # # The angles in the Cartesian plot correspond to: # - 0° left crank pointing upward, right crank pointing downward # - 90° left crank horizontal forward, right crank backward # - 180° right crank pointing upward, left crank pointing downward # - 270° right crank horizontal forward, left crank backward # - 360° identical to first 0°; left crank vertical pointing upward # # In the polar plot the angles are ‘reset’ when a crank is vertical to be able to compare angles during the downward phases of each crank (i.e. leg), therefore the angles in the polar plot correspond to: # - Top 0° left crank pointing upward, right crank pointing downward # - Left 90° left crank horizontal forward, right crank backward # - Bottom 0°right crank pointing upward, left crank pointing downward # - Right 90° right crank horizontal forward, left crank backward # - Top 0° identical to first 0°; left crank vertical pointing upward from wblib.tools import polar_force_column_labels from matplotlib.pyplot import text single_revolution = wdf.iloc[99999] forces = pd.Series([single_revolution[angle] for angle in polar_force_column_labels()]) fig_forces_cartesian = forces.plot() fig_forces_cartesian.set_xlabel('Angle (°)') fig_forces_cartesian.set_ylabel('Tangential pedal force (-)'); fig_forces_cartesian.set_xticks(range(0, 361, 60)); fig_forces_polar = WattbikeDataFrame([wdf.iloc[1337]]).plot.polar() # ## Methods and data # The Wattbike indoor bike measures a resultant of the tangential pedal force by measuring the tension on the chain. The chain tension is measured with a constant frequency of 100Hz which results in a different number of datapoints for each pedal revolution depending on the cadence. The excellent (yeah... I wrote it myself) library [wblib](https://github.com/aartgoossens/wblib) interpolates and normalizes these datapoints to 360 datapoints per revolution (1 datapoint per degree) so analyzing the data is quite straightforward. # All Wattbike workouts for 1 athlete (me, the author) between January 1st 2017 and December 1st 2017 are downloaded via a custom script from the Wattbike Hub servers. The total number of workouts is 33, containing a total of 164564 pedal revolutions. The entire dataset 'only' consumes about 520MB of memory, but since writing it to a csv-file on disk would take up about 4.5GB only a subset without the raw polar plot data is available in this repository ('peak_angle/data.csv'). # ## Results fig_average = wdf.plot.polar() wdf.mean_peak_angle.mean() # The average polar plot for all sessions is shown above in figure 1. One can think of this polar plot as the 'signature' polar plot for this cyclist. The average peak angle is 117 degrees. for session_id in set(wdf.session_id): fig_sessions = wdf.loc[wdf.session_id == session_id].plot.polar(linewidth=2.0) # If the average polar plot for each workout is plotted separately like in figure 2 above, you can see that allthough the main characteristics of the plots (shape, peak angle, 'height' of dead spot) are similar, there is some variation between workouts. The brown-ish and blue polar plots are most distinguishable from the rest and happen to correspond to the two shortest workouts (containing only 21 and 215 revolutions each) where the cyclist probably only recorded part of a warming-up. min_power = 0 max_power = 100 while max_power <= 600: fig_power = wdf.loc[wdf.power.between(min_power, max_power)].plot.polar(linewidth=2) min_power += 100 max_power += 100 fig_power.legend(['{}-{}W'.format(i, i+100) for i in range(0, 600, 100)], labelspacing=0); # When we look at the polar plot for different power ranges a similar result is shown (figure 3). Allthough there is some variation between power ranges, the overall characteristics are very similar, with the exception of the range 0-100W. At first sight there does not seem to be an obvious relationship between power and peak angle. min_cadence = 50 max_cadence = 60 while max_cadence <= 120: fig_cadence = wdf.loc[wdf.cadence.between(min_cadence, max_cadence)].plot.polar(linewidth=2) min_cadence += 10 max_cadence += 10 fig_cadence.legend(['{}-{}rpm'.format(i, i+10) for i in range(50, 120, 10)], labelspacing=0); # When the same plot is made for cadence (figure 3) it looks like there is more variation and also a positive relationship between peak angle and cadence: higher cadence appears to correlate to higher peak angles. The overall shape of the polar plots stays very similar between cadence ranges although the dead spots are less 'dead' at higher cadences. One thing to note here is that the polar plots for higher cadences are smaller. This is most probably a result of the fact that the forces of which each polar plot consist are normalized to their mean, therefore 'smooth' force curve will result in a smaller polar plot than for a force curve where the peak force is more pronounced. # To calculate the model describing these data a linear mixed model is used. This type of model can be used to calculate linear regression in the form of y=a*x + b (but also for multiple independent variables) where the samples are grouped into subsets. The model parameters are shown in the table below. # + import statsmodels.api as sm X = wdf_without_polar_plots[['power', 'cadence']] X = sm.add_constant(X) y = wdf_without_polar_plots['mean_peak_angle'] groups = wdf_without_polar_plots['session_id'] model = sm.MixedLM(y, X, groups).fit() predictions = model.predict(X) model.summary() # - print('mean_peak_angle = power*{power_coef} + cadence*{cadence_coef} + {constant}'.format( power_coef=model.params.power.round(3), cadence_coef=model.params.cadence.round(3), constant=int(model.params.const.round(0)) )) # The resulting formula for the model is: # # mean_peak_angle = power\*-0.025 + cadence\*0.63 + 68.295 # # The results of the analysis make it clear that both power and cadence have a signficant relationship with the peak force angle: For both independent variables the p value is <0.05 (even <0.001). With a large number of samples like this (n=164564) a significant relationship between variables is highly likely so in this case it is more interesting to look at effect sizes, the coefficients. For the independent variable power this coefficient is quite small: For every 1 Watt increase in power the peak force angle decreases by 0.025 degree. For cadence this coefficient is larger: For every 1 rpm increase in cadence the peak force angle increases by 0.63 degree. This data suggests that although power has a significant relationship with peak force angle, a larger amount of the variation in peak force angle is explained by changes in cadence. # ## Conclusion # Although it might sometimes appear as if high power results in high peak angles, the opposite is true: The model parameters show that high peak angles correspond to high cadence and that power even has an inverse relationship with peak angle. There are however a few points to keep in mind when interpreting these results: # - Although the coefficient for power in the model is smaller than the coefficient for cadence, during a typical cycling workout the variation in power can be much larger than the variation in cadence: Whereas the power can change from e.g. 100W to 500W (resulting in a decrease in peak angle of 10°), the cadence can only change from e.g. 70rpm to 110rpm (resulting in an increase in peak of 25°). If you look at the data like this the effect of power is still smaller than the effect of cadence but not as small as you would expect when only looking at the coefficients of the model. # - A confounding factor in the data is that it contains pedal revolutions in both sitting and standing positions. Standing positions seem to increase the peak force angle by a lot (I will publish an article on this topic later) which might add noise to the data. On the other hand, the percentage of standing pedal revolutions is estimated at <0-5% so the effect could be small. # - The dataset contains only data from 1 cyclist. The relationship between power, cadence and peak angles might be existent for this cyclist but not for another. I am working on gathering more data to do the same analysis for more cyclists. Get in touch with me if you are willing to donate your data. :) # ## Exporting images import numpy as np figures = [fig_forces_cartesian, fig_forces_polar, fig_average, fig_sessions, fig_power, fig_cadence] figure_names = ['fig_forces_cartesian', 'fig_forces_polar', 'fig_average', 'fig_sessions', 'fig_power', 'fig_cadence'] for fig, name in zip(figures, figure_names): if hasattr(fig, 'savefig'): f = fig elif isinstance(fig, np.ndarray): f = fig[0].get_figure() else: f = fig.get_figure() f.savefig(f'{name}.png', dpi=600)
peak_angles/article.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + def validMountainArray(A): if len(A) < 3: return False flag = 0 for i in range(len(A) - 1): if flag == 0: if A[i] >= A[i + 1]: return False else: if A[i] <= A[i + 1]: return False if (i < len(A) - 2) and A[i] < A[i + 1] and A[i + 1] > A[i + 2]: flag = 1 return True print(validMountainArray([0, 2, 3, 3, 5, 2, 1, 0])) # -
Anjani/Leetcode/Array/Valid Mountain Array.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pyspark.sql import SparkSession from pyspark import SparkFiles import pandas as pd # + # # !pip install pyspark # - def extract(endpoint, file_name, show=True): # Start spark session to talk to AWS spark = SparkSession.builder.appName("project").config("spark.driver.extraClassPath","/content/postgresql-42.2.9.jar").getOrCreate() # Extract file from AWS url = endpoint spark.sparkContext.addFile(url) df = spark.read.csv(SparkFiles.get(file_name), sep=",", header=True, inferSchema=True) # Convert from spark dataframe to pandas for ease of use df = df.toPandas() if show == True: display(df.head()) return df endpoint = "https://burdenderek-project.s3.us-east-2.amazonaws.com/resources/student-mat.csv" file_name = "student-mat.csv" math = extract(endpoint, file_name) endpoint = "https://burdenderek-project.s3.us-east-2.amazonaws.com/resources/student-por.csv" file_name = "student-por.csv" por = extract(endpoint, file_name) def transform(data, show=True): # clean bucket the grades # 10 and above is a pass # 9 and below is a fail # bucket the grades into passing(1) and failling(0) # failling data.loc[(data["G1"] < 10), "G1"] = 0 data.loc[(data["G2"] < 10), "G2"] = 0 data.loc[(data["G3"] < 10), "G3"] = 0 #passing data.loc[(data["G1"] >= 10), "G1"] = 1 data.loc[(data["G2"] >= 10), "G2"] = 1 data.loc[(data["G3"] >= 10), "G3"] = 1 if show == True: display(data.head()) return transform(math) transform(por) def pandas_to_pyspark_df(df, show=True): from pyspark.sql import SparkSession spark = SparkSession.builder.appName("pandas to Spark").getOrCreate() df = spark.createDataFrame(df) if show == True: df.show(5) return df math = pandas_to_pyspark_df(math) por = pandas_to_pyspark_df(por) por.show(5)
ETL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import warnings # turn off warning import os.path as osp import os import numpy as np from sklearn.cluster import KMeans from skimage.io import imread from scipy.spatial.distance import cdist import matplotlib.pyplot as plt # %matplotlib inline colors = [[0,0,0], [255,255,255], [255,0,0],[0,255,0],[0,0,255],[255,255,0],[0,255,255],[255,0,255]] image_files = ["DSC05376.jpeg","DSC05384.jpeg","DSC05386.jpeg"] # + images = [] for image_file in image_files: try: images.append(imread(image_file)) except: continue fig, ax = plt.subplots(ncols = len(images), nrows=1, figsize = (12,12)) for i, image in enumerate(images): ax[i].imshow(image, aspect='equal') ax[i].set_axis_off() plt.tight_layout() plt.show() # - # # Selecting the optimal number of clusters # We need to find the optimal number of clusters to get the best seperation of kernel and background pixels # 1. Train a Kmeans clustering model on K from 1 to 4 # 2. plot the images and select the best seperation for the lowest number of clusters # + MIN_K, MAX_K = 2,5 features = [image.reshape((image.shape[0] * image.shape[1], image.shape[2])) for image in images] data = {} for i, X in enumerate(features): print("Training models on i = {} image".format(i)) data[i] = { 'models' : [], 'labels' : [] } for k in range(MIN_K, MAX_K + 1): print("Traing on K = " + str(k)) model = KMeans(n_clusters=k) labels = model.fit_predict(X).reshape(images[i].shape[:2]) data[i]['models'].append(model) data[i]['labels'].append(labels) # + sample_aspect = (images[0].shape[0] * 3) / (float(images[0].shape[1]) * MAX_K - MIN_K + 1) fig, ax = plt.subplots(nrows=len(data), ncols=MAX_K - MIN_K + 2, figsize=(15, int(15 * sample_aspect))) for i, x in enumerate(ax): x[0].imshow(images[i], aspect='auto') x[0].set_axis_off() ax[0][0].set_title("Original") for iax in range(1,MAX_K - MIN_K + 2): ax[0][iax].set_title("K = "+ str(iax + MIN_K - 1)) for i,x in enumerate(ax): for j,k in enumerate(range(MIN_K, MAX_K + 1)): pred_image = np.zeros(images[i].shape, dtype=int) labels = data[i]['labels'][j] for l in range(k): pred_image[labels == l] = colors[l] x[j+1].imshow(pred_image, aspect='auto') x[j+1].set_axis_off() plt.tight_layout() plt.show() # + fig, ax = plt.subplots(len(data),1, figsize=(5,10), frameon=True, sharex=True) x = list(range(MIN_K, MAX_K + 1)) for i, dat in data.items(): inertia = [model.inertia_ for model in dat['models']] ax[i].plot(x, inertia) ax[i] ax[-1].set_xlabel("Number of Means") ax[len(ax) // 2].set_ylabel("Inertia") plt.xticks(np.arange(2,6)) plt.tight_layout() plt.show() # - # # Histogram Rating
test_scripts/SegmentationByClustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # [View in Colaboratory](https://colab.research.google.com/github/DillipKS/ZS_2018_challenge/blob/master/ZS_challenge_code.ipynb) # + [markdown] id="xsKDKG0gcXUN" colab_type="text" # #ZS 2018 challenge conducted by Hackerrank. # # Link: https://www.hackerearth.com/challenge/competitive/zs-data-science-challenge-2018/ # # Author: <NAME> # # This python notebook will introduce you to data visualisation and data manipulation. You will train a Linear Regressor with Adagrad optimizer and L2 regularization. # # **Happy coding!** # + id="_NQULtpzrMJM" colab_type="code" colab={} from __future__ import print_function import math from IPython import display from matplotlib import cm from matplotlib import gridspec from matplotlib import pyplot as plt import numpy as np import pandas as pd from sklearn import metrics import tensorflow as tf from tensorflow.python.data import Dataset tf.logging.set_verbosity(tf.logging.ERROR) pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.1f}'.format # + id="h69jACJscn7M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="eaa2dc79-5b98-4392-85fc-15a578f7788f" # !pip install -U -q PyDrive # !pip install -U -q xlrd # !pip install --upgrade pip from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials import pandas as pd # + [markdown] id="ZzwLf8-nLuWX" colab_type="text" # Import the datasets in your Google Drive from Github and make all of them shareable. In the shared link for each file, there will be a unique ID which you need to copy and paste here as value to the 'id' key. # + id="TEncxiv_mo-2" colab_type="code" colab={} # Code to read csv file from Google Drive into colaboratory: #1. Authenticate and create the PyDrive client. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) #2. Get the file downloaded = drive.CreateFile({'id':'1HkQQm9ys0UiNRG1X2J66HZGcFFi75tna'}) # replace the id with id of file you want to access downloaded.GetContentFile('promotional_expenses.csv') #3. Read file as panda dataframe expenses_df = pd.read_csv('promotional_expenses.csv') downloaded = drive.CreateFile({'id':'1Z_XB6-ruOICXEYs_1aEWY9h2WZNVT_JW'}) downloaded.GetContentFile('yds_train2018.csv') yds_train_df = pd.read_csv('yds_train2018.csv') downloaded = drive.CreateFile({'id':'17GNCPd-9TEruKVNUQTOkKraMOZRy3KgN'}) downloaded.GetContentFile('yds_test2018.csv') yds_test_df = pd.read_csv('yds_test2018.csv') downloaded = drive.CreateFile({'id':'1JMfYCyWxZVm71U5amCrOmL0HxC-BlV4w'}) downloaded.GetContentFile('holidays.xlsx') holidays_df = pd.read_excel('holidays.xlsx') # + [markdown] id="ZA5p1S8PZvlF" colab_type="text" # ** Data visualisation** # # # Visualise the data as tables and histograms to understand the problem and data further. # # + id="YAe9Lv73c5FE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 720} outputId="a9148d4d-68db-49ee-e164-a047a63ecd09" #1. Expenses data print(expenses_df.describe()) print(expenses_df.hist()) print(expenses_df.head()) # + id="zEoIZCz1dhb2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 130} outputId="ef90ce1a-87ec-448c-f693-a26d2684814a" #2. Holidays date data print(holidays_df.head()) # + id="N1pbRxhSdwGb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 758} outputId="8d70182e-bd56-4ab0-960d-42e42da67228" #3. Training data print(yds_train_df.describe()) print(yds_train_df.hist()) print(yds_train_df.head()) # + [markdown] id="a55kICZ7aTSW" colab_type="text" # Notice Sales value can be Negative. # + id="Smaqr_8deHS8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 423} outputId="e0c3a8be-f539-4d4a-b061-1245df465ff5" yds_train_df.loc[(yds_train_df['Sales'] < 0)] # + id="Yazu63X9eRi5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 909} outputId="fcb898a7-b1eb-4e96-a9d9-78948fc5e5bd" #4. Test data # Value in Sales column needs to be predicted print(yds_test_df.describe()) print(yds_test_df) print(yds_test_df.hist()) # + [markdown] id="YJsDAWiZe6pa" colab_type="text" # Merchant_ID and Week columns are not present in Test data. At first glance, they seem irrelevant for Model training. Hence, group Training data rows summing the Sales value for common Merchant_ID and Week values. # + id="EoWKWFQ4e6Mr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 733} outputId="e440a341-4c4c-4285-fe59-fe4457e3d1cd" grouped_df = yds_train_df[["Year","Month","Product_ID","Country","Sales"]] grouped_df = grouped_df.groupby(["Year","Month","Country","Product_ID"]).sum().apply(list) grouped_df = grouped_df.reset_index() # flattening the grouped dataframe print(grouped_df.head()) print(grouped_df.describe()) print(grouped_df.hist()) # + [markdown] id="8C6NJDHkf1Ym" colab_type="text" # Notice that Sales value in the grouped Training data is always positive for all rows. Also, the total number of rows now are 388, earlier there were 79072 rows. # # Expenses data is given against 4 features, namely, Year, Month, Country and Product_ID, which are also the ones in grouped Training and Test data. Hence, merge Expenses data with grouped Training and Test data. # + id="Vt31g4hhem8n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 909} outputId="2ea6ae7b-2ec0-4f6d-dc7b-d0e52fb292e5" merge_train_df = pd.merge(expenses_df, grouped_df, on = ['Year','Month','Product_ID','Country'], how='right') print(merge_train_df.describe()) print(merge_train_df.hist()) print(merge_train_df) # + [markdown] id="z0DzrweBhzDx" colab_type="text" # Notice that there are some rows with Nan value against Expense_Price column. It means expenses data is unavailable for those rows in the grouped Training data. # # Values in Expense_Price and Sales column are given in local currency of that particular country. We need to convert them to a common currency (like USD) and scale those values accordingly. # + [markdown] id="FnbKagagiMqb" colab_type="text" # Conversion rate for different currencies to USD as on 23rd July '18 are used as below: # # # * arg_usd = 0.036 (Argentina) # * bel_usd = 1.17 (Belgium) # * col_usd = 0.00035 (Columbia) # * den_usd = 0.16 (Denmark) # * eng_usd = 1.31 (England) # * fin_usd = 1.17 (Finland) # + id="upddyGSQhN-D" colab_type="code" colab={} def scaling_usd(merge_train_df): # Scaling local currency to USD merge_train_df["Scaled_Expense_Price"] = pd.Series() merge_train_df["Scaled_Sales"] = pd.Series() # Local currency to USD conversion rate arg_usd = 0.036 bel_usd = 1.17 col_usd = 0.00035 den_usd = 0.16 eng_usd = 1.31 fin_usd = 1.17 for i, row in merge_train_df.iterrows(): if row["Country"] == 'Argentina': merge_train_df.at[i, "Scaled_Expense_Price"] = arg_usd * row["Expense_Price"] merge_train_df.at[i, "Scaled_Sales"] = arg_usd * row["Sales"] elif row["Country"] == 'Belgium': merge_train_df.at[i, "Scaled_Expense_Price"] = bel_usd * row["Expense_Price"] merge_train_df.at[i, "Scaled_Sales"] = bel_usd * row["Sales"] elif row["Country"] == 'Columbia': merge_train_df.at[i, "Scaled_Expense_Price"] = col_usd * row["Expense_Price"] merge_train_df.at[i, "Scaled_Sales"] = col_usd * row["Sales"] elif row["Country"] == 'Denmark': merge_train_df.at[i, "Scaled_Expense_Price"] = den_usd * row["Expense_Price"] merge_train_df.at[i, "Scaled_Sales"] = den_usd * row["Sales"] elif row["Country"] == 'England': merge_train_df.at[i, "Scaled_Expense_Price"] = eng_usd * row["Expense_Price"] merge_train_df.at[i, "Scaled_Sales"] = eng_usd * row["Sales"] elif row["Country"] == 'Finland': merge_train_df.at[i, "Scaled_Expense_Price"] = fin_usd * row["Expense_Price"] merge_train_df.at[i, "Scaled_Sales"] = fin_usd * row["Sales"] return merge_train_df # + id="EexugV6kjW-u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1136} outputId="8925f0a5-32ba-45ed-8ccc-7e5ebceceeb1" merge_train_df = scaling_usd(merge_train_df) print(merge_train_df.head()) print(merge_train_df.hist()) print(merge_train_df.describe()) # + [markdown] id="6E70Li4IkNQm" colab_type="text" # After grouping and standardizing the training data, let's visualize the data now between each feature and target value (Scaled_Sales). # + id="JRqNBFR9jfZY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="8041b35c-3367-4757-a8bd-952364e87112" plt.scatter(merge_train_df["Scaled_Expense_Price"], merge_train_df["Scaled_Sales"]) # + [markdown] id="WxecVqYEk4DI" colab_type="text" # Notice how all the data points are grouped in 3 different bins, first in which points are close to 0, second in which expense price < 2200 and points are aligned in a line having high slope, 3rd in which expense price > 5000 and points align in a line having moderate slope in above graph Scaled_Expense_Price v/s Scaled_Sales. Capture the data points placed in these 3 bins and plot them separately. # # # + id="KEtZxHSUkVvN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="6e8dfdcf-bbd8-4468-ce61-2c4bee803f8d" plt.scatter(merge_train_df["Product_ID"], merge_train_df["Scaled_Sales"]) # + id="MqnJl0gbkWP1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="5e083852-4944-4ee0-96ed-fc0528bcca0e" plt.scatter(merge_train_df["Country"], merge_train_df["Scaled_Sales"]) # + [markdown] id="opTQj-VxbzqK" colab_type="text" # Notice above graph of country-wise Sales. We see that Scaled_Sales value for two countries, namely, Finland and Denmark are quite high, well above the rest. Something similar we also noticed in Expenses v/s Sales graph. # + id="-eo7vJFrkY0Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="e2f55261-4406-4618-eee4-1e48939699e2" plt.scatter(merge_train_df["Month"], merge_train_df["Scaled_Sales"]) # + [markdown] id="RUAZ6gQfgS8B" colab_type="text" # Parse the Date column in Holidays data to extract Year and Month. Convert Holidays data as per the grouped Training data by adding scaled Expenses and Sales value to visualise how holidays drive the Sales value. # + id="07vypg9mfhX2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 451} outputId="93d0e3d8-48cd-400c-e06e-7323ddf3d9ed" holidays = pd.DataFrame() year = [] month = [] for i in holidays_df["Date"]: date = i.split(',') year.append(int(date[0])) month.append(int(date[1])) holidays["Year"] = year holidays["Month"] = month holidays["Country"] = holidays_df["Country"] holidays = holidays.drop_duplicates() print(holidays) print(holidays.describe()) # + id="Wn8XM4qZgTjO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 885} outputId="035b29b1-de10-4b55-b44d-2b619c203d6f" drop_prod = merge_train_df.drop(columns="Product_ID") drop_prod = drop_prod.groupby(["Year","Month","Country"]).sum().apply(list).reset_index() merge_holidays = pd.merge(holidays, drop_prod, on=['Year','Month','Country'], how='inner') print(merge_holidays.describe()) print(merge_holidays) # + id="pTNoEpbWgTgD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="6bccaaf5-29c4-4617-8fe9-4e8067d0e41c" print(merge_holidays.shape[0]) print(merge_train_df.shape[0]) # + [markdown] id="AX4n74leiFk8" colab_type="text" # Notice that the number of rows in merged Holidays table has been reduced to 154 from 353 rows in Holidays table. It implies that Expenses and Sales data for as many as 299 data points in Holidays table are not available in merged Training data. It is possible that some data points absent in Training data are present in Test data. # + id="dnJQgygJgTcd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="1a531f43-f5ae-4466-c7d2-d86cc9e65baa" plt.scatter(merge_holidays["Scaled_Expense_Price"], merge_holidays["Scaled_Sales"]) # + id="0zZahHN4jvhz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="22765eb3-1fb9-4be6-83ae-afb5458543dc" plt.scatter(merge_train_df["Scaled_Expense_Price"], merge_train_df["Scaled_Sales"]) # + id="DlFl65eIlbvF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="41cd71f0-e390-4a80-9360-5f38c4d35ca6" plt.scatter(merge_holidays["Country"], merge_holidays["Scaled_Sales"]) # + [markdown] id="liEIShiwlXwf" colab_type="text" # We see that not much difference is there between plots for data points in Holidays and grouped Training data. So, it seems like nothing new can be learned from Holidays data. # + [markdown] id="bqVWufgwcYJu" colab_type="text" # Segregate training data into 3 bins based on their Expenses and Sales value as suggested above. Look for the country for which data in each bin belongs to. Compare the binned data with training data for that particular country. # + id="8_w5sLdbkahr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 905} outputId="686ae4bd-6713-4df5-ee36-32dd3cc494a5" data1 = merge_train_df.loc[(merge_train_df["Scaled_Sales"]>10000000.0) & (merge_train_df["Scaled_Expense_Price"]<2200.0)] print(data1) print(data1["Country"].unique()) plt.scatter(data1["Scaled_Expense_Price"], data1["Scaled_Sales"]) # + id="t3S1ExARl6mD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 526} outputId="5d649c1a-cea9-4000-e1da-9acfd1dde7c5" print(merge_train_df.loc[(merge_train_df["Country"]=='Finland')]) # + id="xuQIUPCJo4GR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 905} outputId="70378c0f-e231-43b1-9901-0bafe0cc8f8a" data2 = merge_train_df.loc[merge_train_df["Scaled_Expense_Price"]>5000.0] print(data2) print(data2["Country"].unique()) plt.scatter(data2["Scaled_Expense_Price"], data2["Scaled_Sales"]) # + id="eHQR5r8gpSH0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 526} outputId="42ae008d-4324-4d06-f180-e6508f164343" print(merge_train_df.loc[(merge_train_df["Country"]=='Denmark')]) # + id="stj6oASOr63b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 894} outputId="9f5dece7-e989-451b-bc9e-4fd1ffb4d2a0" data3 = merge_train_df.loc[(merge_train_df["Scaled_Sales"]<10000000.0) & (merge_train_df["Scaled_Expense_Price"]<2000.0)] print(data3) print(data3["Country"].unique()) plt.scatter(data3["Scaled_Expense_Price"], data3["Scaled_Sales"]) # + [markdown] id="0FkpLD_jdJvn" colab_type="text" # We make the following observation: # * Data points in 1st bin belong to Finland (points form line with high slope) # * Data points in 2nd bin belong to Denmark (points form line with moderate slope) # * Data points in 3rd bin belong to the remaining 4 countries, namely Argentina, Belgium, Columbia and England (points which are close to origin) # + [markdown] id="zEej5krpmyb4" colab_type="text" # Above visualisation and analysis show that two features, namely Country and Scaled_Expense_Prices mainly determine the target value of Scaled_Sales. # # Let's group the training data so as to keep these two features and sum the entries of Scaled_Sales for common rows. # + id="ovxVmCUEnirz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 696} outputId="b775ab77-ba8d-4a83-f0e5-796f937ebeee" grouped_df2 = merge_train_df.drop(columns=['Year','Month','Product_ID','Expense_Price','Sales']) grouped_df2 = grouped_df2.groupby(["Country","Scaled_Expense_Price"]).sum().apply(list) grouped_df2 = grouped_df2.reset_index() # flattening the grouped dataframe print(grouped_df2.head()) print(grouped_df2.describe()) print(grouped_df2.hist()) # + [markdown] id="OACcYN-UesO7" colab_type="text" # Let's now apply the same changes to Test data too. # + id="IJa7GZBRyNS6" colab_type="code" colab={} # Merge expenses_df and yds_test_df dataframes preserving all the rows that were present in yds_test_df merge_test_df = pd.merge(yds_test_df, expenses_df, on = ['Year','Month','Product_ID','Country'], how='left') # Sort the rows based on S_No as that's how yds_test_df is sorted merge_test_df = merge_test_df.sort_values('S_No') merge_test_df = merge_test_df.drop(columns='S_No') merge_test_df["Sales"] = 0.0 # + id="VUwbXy1VyfpY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 909} outputId="5a27812f-6e11-4367-b359-2b3ccf518947" print(merge_test_df.describe()) print(merge_test_df.hist()) print(merge_test_df) # + [markdown] id="QIQxf6lse9AF" colab_type="text" # Replace rows with nan values in Expense_Price column with its mean value in training data respective to the country. # + id="u0AiQx5oyjT4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="f04e7db5-470c-45f0-f48e-04a0ddcaf901" train_arg = merge_train_df.loc[merge_train_df['Country']=='Argentina'] mean_train_arg = train_arg["Expense_Price"].mean() train_col = merge_train_df.loc[merge_train_df['Country']=='Columbia'] mean_train_col = train_col["Expense_Price"].mean() print(mean_train_arg) print(mean_train_col) # + id="K-h6TM0vzDOO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 245} outputId="68ad494c-e3be-48d5-8ada-f93aa98f2cac" for i in zip(*np.where(pd.isnull(merge_test_df['Expense_Price']))): if merge_test_df['Country'].loc[i] == 'Argentina': merge_test_df['Expense_Price'].loc[i] = mean_train_arg elif merge_test_df['Country'].loc[i] == 'Columbia': merge_test_df['Expense_Price'].loc[i] = mean_train_col print(merge_test_df.tail()) # + id="34fQpCytzOdP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 526} outputId="1b39b2b6-caa4-46e9-db46-2143728e455f" merge_test_df = scaling_usd(merge_test_df) print(merge_test_df) # + id="V1fFgXDZz16g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 834} outputId="c6936915-6343-4867-de46-bb597ffcef22" test_grouped_df2 = merge_test_df.drop(columns=['Year','Month','Product_ID','Expense_Price','Sales']) # Don't do grouping of test data as that reduces the number of rows. #test_grouped_df2 = test_grouped_df2.groupby(["Country","Scaled_Expense_Price"]).sum().apply(list) #test_grouped_df2 = test_grouped_df2.reset_index() # flattening the grouped dataframe print(test_grouped_df2) print(test_grouped_df2.describe()) print(test_grouped_df2.hist()) # + id="d-HRnknJ0OPr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="7a2f2038-a75f-4112-b504-b04380f73fc3" # Check the final number of training and test data points. print(grouped_df2.shape[0]) print(test_grouped_df2.shape[0]) # + [markdown] id="y2UyWPnvmeS9" colab_type="text" # As there is not much difference in Sales and Expenses value of rows in 3rd bin (points that are close to 0), we remove the Country name and instead label them all as 'Others' in Country column to simplify the model. # + id="dyRr-MOf2Dt6" colab_type="code" colab={} def country_others(dataframe): for i, row in dataframe.iterrows(): if (row["Country"]=='Argentina' or row["Country"]=='Belgium' or row["Country"]=='Columbia' or row["Country"]=='England'): dataframe.at[i, "Country"] = "Others" return dataframe # + id="P_3wSpcYBixP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 583} outputId="fdda8318-b052-4c93-a5b7-1c994fff3d67" grouped_df2 = country_others(grouped_df2) binned_test_df2 = country_others(test_grouped_df2) print(grouped_df2) print(grouped_df2["Country"].unique()) print(binned_test_df2) print(binned_test_df2["Country"].unique()) # + [markdown] id="yq01OwEun6eS" colab_type="text" # As Country contains string values and not numeric, its difficult to use the data for model training. Therefore, we replace Country column with 3 new ones, namely, 'Others', 'Denmark' and 'Finland' and add 1.0 to the column where country name belongs to that. # + id="DjscoiEl1pcM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="e854446a-4e61-41f5-ca3a-43fd34b743fa" Country = list(grouped_df2["Country"].unique()) print(Country) # + id="voAePEOlCSnx" colab_type="code" colab={} def select_and_transform_features(source_df, expenses=False): selected_ex = pd.DataFrame() selected_ex["Scaled_Sales"] = source_df["Scaled_Sales"] if expenses: selected_ex["Scaled_Expense_Price"] = source_df["Scaled_Expense_Price"] for c in Country: selected_ex[c] = source_df["Country"].apply( lambda l: 1.0 if l == c else 0.0) return selected_ex # + id="EThVLrghDSMm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 281} outputId="9e061bc7-07d6-47d9-9841-db47659463da" selected_grouped_df2 = select_and_transform_features(grouped_df2, expenses=True) print(selected_grouped_df2) # + id="eVdVPhVcDrmo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 281} outputId="95378a23-0fa1-462a-b323-0d69569393e0" selected_test_grouped_df2 = select_and_transform_features(binned_test_df2, expenses=True) print(selected_test_grouped_df2) # + [markdown] id="zdP1f6d1o_z_" colab_type="text" # Randomize the Training data and split it into Training and Validation set. # + id="3vJd41FQ0lXz" colab_type="code" colab={} # Randomize the rows selected_grouped_df2 = selected_grouped_df2.reindex(np.random.permutation(selected_grouped_df2.index)) # + id="VKOvGhwF0zDK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 847} outputId="1d6a967c-8ce2-4ecf-be78-83f2a56d25cb" split_ratio = 0.7 total = len(grouped_df2) split = int(split_ratio * total) feature = selected_grouped_df2[["Others", "Denmark","Finland","Scaled_Expense_Price"]] target = selected_grouped_df2[["Scaled_Sales"]] train_ex = feature.head(split) train_targets = target.head(split) validation_ex = feature.tail(total-split) validation_targets = target.tail(total-split) print(train_ex.shape[0]) print(validation_targets.shape[0]) print(train_ex) print(train_targets) print(validation_targets) # + [markdown] id="ncLkgi1bphXQ" colab_type="text" # Below are functions for Machine Learning model training. We use the following: # * Linear Regressor to fit a straight hyperplane to the data # * Adagrad Optimizer with gradient clipping # * L2 regularization # # To calculate the loss, we use Symmetric Mean Absolute Percent Error (SMAPE). SMAPE is the average of difference between forecast and actual values divided by the sum of their absolute values. # + id="tcq2Rw1xQ73s" colab_type="code" colab={} def construct_feature_columns(input_features): """Construct the TensorFlow Feature Columns. Args: input_features: The names of the numerical input features to use. Returns: A set of feature columns """ return set([tf.feature_column.numeric_column(my_feature) for my_feature in input_features]) # + id="7DjkQHdiRoYt" colab_type="code" colab={} def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None): """Trains a linear regression model. Args: features: pandas DataFrame of features targets: pandas DataFrame of targets batch_size: Size of batches to be passed to the model shuffle: True or False. Whether to shuffle the data. num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely Returns: Tuple of (features, labels) for next data batch """ # Convert pandas data into a dict of np arrays. features = {key:np.array(value) for key,value in dict(features).items()} # Construct a dataset, and configure batching/repeating. ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit ds = ds.batch(batch_size).repeat(num_epochs) # Shuffle the data, if specified. if shuffle: ds = ds.shuffle(100) # Return the next batch of data. features, labels = ds.make_one_shot_iterator().get_next() return features, labels # + id="-eXp5gkkCJ3T" colab_type="code" colab={} import math def smape_fast(y_pred, y_true): out = 0 for i in range(y_true.shape[0]): a = math.fabs(y_true[i]) b = math.fabs(y_pred[i]) c = a + b if c == 0: continue out += math.fabs(a - b) / c out *= (200.0 / y_true.shape[0]) return out # + id="dO7I_TMXDCuf" colab_type="code" colab={} def train_linear_regressor_model( learning_rate, steps, batch_size, regularization, training_examples, training_targets, validation_examples, validation_targets): """Trains a linear regression model. In addition to training, this function also prints training progress information, as well as a plot of the training and validation loss over time. Returns: A `LinearRegressor` object trained on the training data. """ periods = 15 steps_per_period = steps / periods # Create a linear regressor object. my_optimizer = tf.train.ProximalAdagradOptimizer(learning_rate=learning_rate, l2_regularization_strength=regularization) #Adagrad my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0) linear_regressor = tf.estimator.LinearRegressor( feature_columns=construct_feature_columns(training_examples), optimizer=my_optimizer ) # Create input functions. training_input_fn = lambda: my_input_fn(training_examples, training_targets, batch_size=batch_size) predict_training_input_fn = lambda: my_input_fn(training_examples, training_targets, num_epochs=1, shuffle=False) predict_validation_input_fn = lambda: my_input_fn(validation_examples, validation_targets, num_epochs=1, shuffle=False) # Train the model, but do so inside a loop so that we can periodically assess loss metrics. print("Training model...") print("SMAPE (on training data):") training_smape = [] validation_smape = [] for period in range (0, periods): # Train the model, starting from the prior state. linear_regressor.train( input_fn=training_input_fn, steps=steps_per_period ) # Take a break and compute predictions. training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn) training_predictions = np.array([item['predictions'][0] for item in training_predictions]) validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn) validation_predictions = np.array([item['predictions'][0] for item in validation_predictions]) # Compute training and validation loss. training_smape_error = smape_fast(training_predictions, np.array(training_targets.values)) validation_smape_error = smape_fast(validation_predictions, np.array(validation_targets.values)) # Occasionally print the current loss. print(" period %02d : %0.2f" % (period, training_smape_error)) # Add the loss metrics from this period to our list. training_smape.append(training_smape_error) validation_smape.append(validation_smape_error) print("Model training finished.") # Output a graph of loss metrics over periods. plt.ylabel("SMAPE") plt.xlabel("Periods") plt.title("SMAPE Error vs. Periods") plt.tight_layout() plt.plot(training_smape, label="training") plt.plot(validation_smape, label="validation") plt.legend() return linear_regressor # + [markdown] id="P2YJfBqjGLuh" colab_type="text" # Below is the actual training function. There are 4 hyperparameters which you need to tune to get an optimized model with lowest SMAPE loss. # # Feel free to tweak these values. # + id="XgEysbVN5O5r" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 741} outputId="4c00cb9a-be04-44fd-ade1-d104113e0a3d" linear_regressor = train_linear_regressor_model( learning_rate=40.0, steps=1000, batch_size=10, regularization=0.00006, training_examples=train_ex, training_targets=train_targets, validation_examples=validation_ex, validation_targets=validation_targets) # + [markdown] id="XkphbcnIso6C" colab_type="text" # Use the trained model to predict target values of Validation data and then, compare them with the actual target values. As before, we plot Scaled_Expense_Price v/s Scaled_Sales. # + id="nMCaQvgiQ6E3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="2d82ae70-5fba-45c1-e114-b80ec2130aeb" predict_validation_input_fn = lambda: my_input_fn(validation_ex, validation_targets, num_epochs=1, shuffle=False) validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn) validation_predictions = np.array([item['predictions'][0] for item in validation_predictions]) predictions_df = pd.DataFrame() predictions_df["Feature"] = validation_ex["Scaled_Expense_Price"] predictions_df["Target"] = validation_targets predictions_df["Prediction"] = validation_predictions print(predictions_df.describe()) # + id="VQyboDvzRJsT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="40a3f19a-fa9a-4f1c-be23-0a0170e7dd6d" plt.scatter(predictions_df["Feature"], predictions_df["Target"]) plt.scatter(predictions_df["Feature"], predictions_df["Prediction"]) # + [markdown] id="Dk3n90cds_bM" colab_type="text" # We note that the trained Linear Regressor completely ignores the data points forming line with high slope (pertaining to Finland) and gives totally wrong prediction for these points. It performs well for data belonging to the other two bins. # + [markdown] id="09Xo1ZhHtcIR" colab_type="text" # Convert test data into appropriate format and predict the target value for it. # + id="OqmAPIOHRKqQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 545} outputId="d1823416-4ecb-402e-bcf6-95e9beda35c2" test_feature = selected_test_grouped_df2[["Others", "Denmark","Finland","Scaled_Expense_Price"]] test_target = selected_test_grouped_df2[["Scaled_Sales"]] print(test_feature) print(test_target) # + id="QrpppOE2R7QT" colab_type="code" colab={} predict_test_input_fn = lambda: my_input_fn(test_feature, test_target, num_epochs=1, shuffle=False) test_predictions = linear_regressor.predict(input_fn=predict_test_input_fn) test_predictions = np.array([item['predictions'][0] for item in test_predictions]) # + id="UCR_FWKBSE0o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="6039b778-2cba-4c79-e876-b1ec42910b8b" print(test_feature.head()) print(test_predictions[0:5]) print(len(test_predictions)) # + [markdown] id="3ZwWSZZ_t22g" colab_type="text" # Extract the predicted values for data points belonging to Finland. Compare that with the Sales value of data points belonging to Finland in training data. # + id="wZu1pXF1STdc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="740fa1f1-7687-4a2d-ef88-1f1c39a25838" print(test_feature.loc[(test_feature['Finland']==1.0)]) # + id="f20lBBXw_q-8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="d4951f32-8f3c-4a34-fa60-4fc5b786ee86" print(test_predictions[96:105]) # + id="pLhjQCWoS5TT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 281} outputId="1977bcdd-bba4-4540-a2e7-d7c92e44aecd" print(grouped_df2.loc[(grouped_df2['Country']=='Finland')]) # + [markdown] id="HcQwbrAvudBX" colab_type="text" # We see that the predicted values for above data (of Finland) is off by one digit place. Its range is 1/10th of the range of values in training data. # + [markdown] id="pJYFP7BOvWK0" colab_type="text" # We convert back the predicted values from USD to their appropriate local currency. # + id="UEbKOo9uvTFD" colab_type="code" colab={} def usd_to_local(dataframe_orig): # Scaling USD to local currency # Local currency to USD conversion rate arg_usd = 0.036 bel_usd = 1.17 col_usd = 0.00035 den_usd = 0.16 eng_usd = 1.31 fin_usd = 1.17 # Make a copy of original dataframe as the function modifies the original not a copy of it. dataframe = pd.DataFrame() dataframe = dataframe_orig.copy() for i, row in dataframe.iterrows(): if row["Country"] == 'Argentina': dataframe.at[i, "Scaled_Expense_Price"] = (1.0/arg_usd) * row["Scaled_Expense_Price"] dataframe.at[i, "Scaled_Sales"] = (1.0/arg_usd) * row["Scaled_Sales"] elif row["Country"] == 'Belgium': dataframe.at[i, "Scaled_Expense_Price"] = (1.0/bel_usd) * row["Scaled_Expense_Price"] dataframe.at[i, "Scaled_Sales"] = (1.0/bel_usd) * row["Scaled_Sales"] elif row["Country"] == 'Columbia': dataframe.at[i, "Scaled_Expense_Price"] = (1.0/col_usd) * row["Scaled_Expense_Price"] dataframe.at[i, "Scaled_Sales"] = (1.0/col_usd) * row["Scaled_Sales"] elif row["Country"] == 'Denmark': dataframe.at[i, "Scaled_Expense_Price"] = (1.0/den_usd) * row["Scaled_Expense_Price"] dataframe.at[i, "Scaled_Sales"] = (1.0/den_usd) * row["Scaled_Sales"] elif row["Country"] == 'England': dataframe.at[i, "Scaled_Expense_Price"] = (1.0/eng_usd) * row["Scaled_Expense_Price"] dataframe.at[i, "Scaled_Sales"] = (1.0/eng_usd) * row["Scaled_Sales"] elif row["Country"] == 'Finland': dataframe.at[i, "Scaled_Expense_Price"] = (1.0/fin_usd) * row["Scaled_Expense_Price"] dataframe.at[i, "Scaled_Sales"] = (1.0/fin_usd) * row["Scaled_Sales"] return dataframe # + id="ueZP3gv6yUhb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 281} outputId="1ba9459a-2ae3-49d8-9832-ecba6ed6c4bf" test_grouped_df2["Scaled_Sales"] = test_predictions print(test_grouped_df2) # + id="5cWMcwMAv1pD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 281} outputId="b50e9ea5-c85c-45d0-bfcd-87ead5a86978" test_local_currency = usd_to_local(test_grouped_df2) print(test_local_currency) # + [markdown] id="zIzewhLGQgi2" colab_type="text" # Add the final Sales value column to the Test data. # + id="ltXRTD4SyCuo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 408} outputId="b5444dfb-355a-4c62-9f06-c6c994e569ff" yds_test_df['Sales'] = np.asarray(test_local_currency['Scaled_Sales']) yds_test_df # + [markdown] id="HomUaU-QE0Jw" colab_type="text" # Finally, we convert the dataframe of Test data into a .csv file and download it. # # # P.S. - Use Chrome to download the file using code below as it doesn't work in other browsers. # + id="ud6igKD7DAdN" colab_type="code" colab={} yds_test_df.to_csv('yds_submission2018.csv', index=False) from google.colab import files files.download('yds_submission2018.csv') # + [markdown] id="ABdHvnfbDo2C" colab_type="text" # If you have reached till the end, then pat yourself on the back. You're trully a Rockstar! # # #<-----End----->
ZS_challenge_code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Udacity P5: Vehicle detection and tracking from libraries import * # %matplotlib inline # ## Basic functions (mostly obtained form Udacity class) # ### Basic parameter definition orient = 32 pix_per_cell = 16 cell_per_block = 2 hist_bins = 32 spatial_size = (16,16) # ### Function to convert between color schemes def convert_color(image, color_space='RGB'): if color_space != 'RGB': if color_space == 'HSV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) elif color_space == 'LUV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV) elif color_space == 'HLS': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) elif color_space == 'YUV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV) elif color_space == 'YCrCb': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb) else: feature_image = np.copy(image) return feature_image # ### Function to extract features by HOG technique and predict the resulting count = 0 def find_cars(img, color_space, ystart, ystop, scale, model, X_scaler, show_all=False): img = img.astype(np.float32)/255 img_tosearch = img[ystart:ystop,:,:] ctrans_tosearch = convert_color(img_tosearch, color_space) if scale != 1: imshape = ctrans_tosearch.shape ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale))) ch1 = ctrans_tosearch[:,:,0] ch2 = ctrans_tosearch[:,:,1] ch3 = ctrans_tosearch[:,:,2] nxblocks = (ch1.shape[1] // pix_per_cell)-1 nyblocks = (ch1.shape[0] // pix_per_cell)-1 nfeat_per_block = orient*cell_per_block**2 window = 64 nblocks_per_window = (window // pix_per_cell)-1 cells_per_step = 1 nxsteps = (nxblocks - nblocks_per_window) // cells_per_step nysteps = (nyblocks - nblocks_per_window) // cells_per_step hog1 = hog(ch1, orientations = orient, pixels_per_cell = (pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, visualise= False, feature_vector=False) hog2 = hog(ch2, orientations = orient, pixels_per_cell = (pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, visualise= False, feature_vector=False) hog3 = hog(ch3, orientations = orient, pixels_per_cell = (pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, visualise= False, feature_vector=False) box_list = [] global count for xb in range(nxsteps+1): for yb in range(nysteps+1): ypos = yb*cells_per_step xpos = xb*cells_per_step hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3)) xleft = xpos*pix_per_cell ytop = ypos*pix_per_cell subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64)) spatial_features = cv2.resize(subimg, spatial_size).ravel() test_features = X_scaler.transform(np.hstack((spatial_features, hog_features)).reshape(1, -1)) test_prediction = model.predict(test_features) if test_prediction == 1 or show_all: xbox_left = np.int(xleft*scale) ytop_draw = np.int(ytop*scale) win_draw = np.int(window*scale) box = ((xbox_left, ytop_draw+ystart), (xbox_left+win_draw,ytop_draw+win_draw+ystart)) box_list.append(box) return box_list # ### Extract feature function used to extract features from an image using HOG # ### function that creates the slide window def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None], xy_window=(64, 64), xy_overlap=(0.33, 0.2)): if x_start_stop[0] == None: x_start_stop[0] = 0 if x_start_stop[1] == None: x_start_stop[1] = img.shape[1] if y_start_stop[0] == None: y_start_stop[0] = 0 if y_start_stop[1] == None: y_start_stop[1] = img.shape[0] x_start_stop = [np.int(i) for i in x_start_stop] y_start_stop = [np.int(i) for i in y_start_stop] xspan = x_start_stop[1] - x_start_stop[0] yspan = y_start_stop[1] - y_start_stop[0] nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0])) ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1])) nx_buffer = np.int(xy_window[0]*(xy_overlap[0])) ny_buffer = np.int(xy_window[1]*(xy_overlap[1])) nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step) ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step) window_list = [] for ys in range(ny_windows): for xs in range(nx_windows): startx = xs*nx_pix_per_step + x_start_stop[0] endx = startx + xy_window[0] starty = ys*ny_pix_per_step + y_start_stop[0] endy = starty + xy_window[1] window_list.append(((startx, starty), (endx, endy))) return window_list # ### Function for creating the boundary boxes around cars def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6, make_copy=True): if make_copy: imcopy = np.copy(img) else: imcopy = img for bbox in bboxes: cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick) return imcopy # ### Class image_tracer # + class image_tracer(object): def __init__(self, file_car, file_non_car): self.route_car = self.get_files(file_car) self.route_non_car = self.get_files(file_non_car) self.car_images = [mpimg.imread(i) for i in self.route_car] self.non_car_images = [mpimg.imread(i) for i in self.route_non_car] def get_files(self, route, lst_ext=['.jpeg', '.png', '.jpg']): assert(os.path.exists(route)) result = [] for directory, dirs, files in os.walk(route): result = result + [os.path.join(directory, f) for f in files if os.path.splitext(f)[1] in lst_ext] return result def get_all_image(self): """Get a list of images and their labels""" X = np.vstack((self.car_images, self.non_car_images)) y = np.hstack((np.ones(len(self.car_images)), np.zeros(len(self.non_car_images)))) return X, y def boxe_creator(image, num): for num1 in num: cv2.rectangle(new_image, num1[0], num1[1], (10, 10, 150), 4) return new_image color_space = 'YCrCb' # - # ### Class Model which divides set into train and validation set before performing training class Model(object): def __init__(self, model_file=None): if model_file is None: self.model = LinearSVC() self.scalar = None else: with open(model_file, 'rb') as f: self.model, self.scalar = pickle.load(f) def save_model(self): with open('model.pkl', 'wb') as f: pickle.dump((self.model,self.scalar), f) def preprocess(self, images): features = [] for i in images: features.append(extract_features(i, color_space=color_space, hog_channel='ALL')) return np.array(features).astype(np.float64) def train(self, X, y, test_size=0.3): """Given a list of images and their label, train the model.""" features = self.preprocess(X) self.scalar = StandardScaler().fit(features) scaled_X = self.scalar.transform(features) stat = np.random.randint(0, 100) scaled_X, y = shuffle(scaled_X, y) X_train, X_test, y_train, y_test = train_test_split(scaled_X, y, test_size=test_size, random_state=stat) self.model.fit(X_train, y_train) score = self.model.score(X_test, y_test) def predict(self, feature): return self.model.predict(feature) # ### Class to search for cars in the window class CarSearch(object): def __init__(self, model): self.model = model self.history = collections.deque(maxlen=7) def __search_cars_in_image(self, img): image_size = img.shape box_list = [] def search(ybeg, yend, scale): return find_cars(img, color_space, ybeg, yend, scale, self.model, self.model.scalar, show_all=False) box_list += search(360, 490, 1) box_list += search(390, 570, 2) return box_list def annotate_cars_in_image(self, img): box_list = self.__search_cars_in_image(img) cars = draw_boxes(img, box_list) return cars def __add_heat(self, box_list): heat = np.zeros((720, 1280)) #hardcode for now for box in box_list: heat[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1 self.history.append(heat) def generate_heat(self): heat = np.sum(np.array(self.history), axis=0) if len(self.history) == self.history.maxlen: heat[heat <= 3] = 0 return heat def annotate_cars_in_video(self, test_image): box_list = self.__search_cars_in_image(test_image) self.__add_heat(box_list) heat = self.generate_heat() pixels, ncars = label(heat) new_boxes = [] for i in range(1, ncars+1): nonzero = (pixels == i).nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) box = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy))) new_boxes.append(box) draw_boxes(test_image, new_boxes, make_copy=False) return test_image # + def start_train(route_car, route_non_car): start_train_data = image_tracer(route_car, route_non_car) #training_data = CarImages(vehicle_folder, nonvehicle_folder) model = Model() search = CarSearch(model) X, y = start_train_data.get_all_image() model.train(X, y) model.save_model() return model if __name__ == '__main__': start_train('vehicles', 'non-vehicles') # - def show_image(img, ax=None): if ax is None: ax = plt if len(img.shape) == 2: ax.imshow(img, cmap='gray') else: ax.imshow(img) def show_images(img_titles): cols = len(img_titles) f, axs = plt.subplots(1, cols, figsize=(24, 9)) f.tight_layout() for i, image_title in enumerate(img_titles): if type(image_title) is tuple or type(image_title) is list: img, title = image_title else: img = image_title title = "unspecified" axs[i].set_title(title, fontsize=30) show_image(img, axs[i]) plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.) # ## HOG detailed orient=9 pix_per_cell=8 cell_per_block=2 spaces = ['RGB', 'HSV', 'LUV'] for s in spaces: #test_image = read_image('vehicles/GTI_Right/image0775.png') test_image = mpimg.imread('vehicles/GTI_Right/image0775.png') conv = convert_color(test_image, s) _, img1 = hog(conv[:,:,0], orientations = orient, pixels_per_cell = (pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, visualise= True, feature_vector=True) _, img2 = hog(conv[:,:,1], orientations = orient, pixels_per_cell = (pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, visualise= True, feature_vector=True) _, img3 = hog(conv[:,:,2], orientations = orient, pixels_per_cell = (pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, visualise= True, feature_vector=True) show_images([(test_image, 'original'), (conv, s), (conv[:,:,0], 'channel 1'), (img1, 'ch1 HOG'), (conv[:,:,1], 'channel 2'), (img2, 'ch2 HOG'), (conv[:,:,2], 'channel 3'), (img3, 'ch3 HOG')]) # # Training # + model = Model('model.pkl') test_image = read_image('test_images/test1.jpg') search = CarSearch(model) annotated = search.annotate_cars_in_image(test_image) show_image(annotated) # - # # Video # + from moviepy.editor import VideoFileClip from IPython.display import HTML count = [0] def save_image(img): mpimg.imsave('output-images/image-%d.png'%count[0], img) count[0] += 1 return img def process_video(video_filename): model = Model('model.pkl') search = CarSearch(model) output = "P5_" + video_filename input_clip = VideoFileClip(video_filename) clip = input_clip.fl_image(search.annotate_cars_in_video) # %time clip.write_videofile(output, audio=False) video_1 = 'test_video.mp4' video_2 = 'video.mp4' process_video(video_2) # - HTML(""" <video width="960" height="720" controls> <source src="{0}"> </video> """.format("annotated_P5_project_video.mp4")) # + ### Reference: https://github.com/lijunsong
Self_Driving_Car----Vehicle-Detection/vehicle-detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next class Solution: def mergeInBetween(self, list1: ListNode, a: int, b: int, list2: ListNode) -> ListNode: ptr = list1 i = 0 while ptr and i < a - 1: i += 1 ptr = ptr.next post = ptr.next ptr.next = list2 while ptr.next: ptr = ptr.next while i < b - 1: post = post.next i += 1 ptr.next = post.next return list1
Anjani/Leetcode/Linked List/Merge In Between Linked Lists.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # dis.008 # Import libraries # + # Libraries for downloading data from remote server (may be ftp) import requests from urllib.request import urlopen from contextlib import closing import shutil # Library for uploading/downloading data to/from S3 import boto3 # Libraries for handling data import rasterio as rio import numpy as np # from netCDF4 import Dataset # import pandas as pd # import scipy # Libraries for various helper functions # from datetime import datetime import os import threading import sys from glob import glob from matplotlib import pyplot # %matplotlib inline # - # s3 tools # + s3_upload = boto3.client("s3") s3_download = boto3.resource("s3") s3_bucket = "wri-public-data" s3_folder = "resourcewatch/raster/ene_019_wind_energy_potential/" s3_file = "ene_018_wind_energy_potential.tif" s3_key_orig = s3_folder + s3_file s3_key_edit = s3_key_orig[0:-4] + "_edit.tif" os.environ["Zs3_key1"] = "s3://wri-public-data/" + s3_key_orig os.environ["Zs3_key2"] = "s3://wri-public-data/" + s3_key_edit class ProgressPercentage(object): def __init__(self, filename): self._filename = filename self._size = float(os.path.getsize(filename)) self._seen_so_far = 0 self._lock = threading.Lock() def __call__(self, bytes_amount): # To simplify we'll assume this is hooked up # to a single filename. with self._lock: self._seen_so_far += bytes_amount percentage = (self._seen_so_far / self._size) * 100 sys.stdout.write("\r%s %s / %s (%.2f%%)"%( self._filename, self._seen_so_far, self._size, percentage)) sys.stdout.flush() # - # Define local file locations # + local_folder = "C:/Users/Max81007/Desktop/Python/Resource_Watch/Raster/cit.018/" file_name = "cit_018_monthly_no2_concentrations_in_atmosphere_201701.tif" local_orig = local_folder + file_name orig_extension_length = 4 #4 for each char in .tif local_edit = local_orig[:-orig_extension_length] + "_edit.tif" # - files = [local_orig, local_edit] for file in files: with rio.open(file, 'r') as src: profile = src.profile print(profile) # Use rasterio to reproject and compress os.getcwd() os.chdir(local_folder) os.environ["local_orig"] =local_orig os.environ["local_edit"] =local_edit # !gdalwarp -overwrite -t_srs epsg:4326 -srcnodata none -co compress=lzw %local_orig% %local_edit% files = [local_orig, local_edit] for file in files: with rio.open(file, 'r') as src: profile = src.profile print(profile) # Upload orig and edit files to s3 # + # Original s3_upload.upload_file(local_orig, s3_bucket, s3_key_orig, Callback=ProgressPercentage(local_orig)) # Edit s3_upload.upload_file(local_edit, s3_bucket, s3_key_edit, Callback=ProgressPercentage(local_edit)) # - os.environ["Zgs_key"] = "gs://resource-watch-public/" + s3_key_orig # !echo %Zs3_key2% # !echo %Zgs_key% # !gsutil cp %Zs3_key2% %Zgs_key% with rio.open(local_orig) as src: data = src.read(indexes=1) pyplot.imshow(data) with rio.open(local_edit) as src: data = src.read(indexes=1) pyplot.imshow(data) os.environ["asset_id"] = "users/resourcewatch/cit_018_monthly_no2_concentrations_in_atmosphere_201701" # !earthengine upload image --asset_id=%asset_id% %Zgs_key% # !earthengine task info F7ZP3YOHXBMERJK2KRG4C5M2
.ipynb_checkpoints/dis.008-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import os scoring = pd.read_pickle(os.path.join('..', 'all_that_pickle', 'scoring_mi.pickle')) birth_dates = pd.read_pickle(os.path.join('..', 'all_that_pickle', 'birth_dates.pickle')) scoring.head() wayne = scoring.loc['gretzwa01'] # filtering only for <NAME> sums = wayne.groupby(level=0)["G", "A"].sum() # since we are grouping by index value (year), we use level sums means = sums.rolling(3)["G"].mean() means.head(10) # %matplotlib inline sums.assign(roll_mean = means).plot(); # the roll_mean is the smooth out version of the original values # the second row is calculated using the just first two values. # if we set min_periods=1, there will be no missing value means = sums.rolling(3, min_periods=2)["G"].mean() means.head(10) means = sums.rolling(3).mean() means.head() means = sums.rolling(3, center=True).mean() # the mean of the last row is now one row up. means.head() means = sums.rolling(4, center=True).mean() # even window size. then the center row is assumend to be the one closer to the end of the window means.head() # win_type: type of filter that changes the importance of the values depending on their location in the window triang_goals_sum = sums['G'].rolling(3, win_type='triang', center=True).sum() sums.assign(triang = triang_goals_sum).head() quarter_counts = birth_dates.resample('Q').count() quarter_counts.rolling(4).sum().head(10) # + # Expanding windows are simpler than rolling windows as you do not specify the size as they produce values # from the start of the column. These are useful when you'd like to see how value is changing with new observations # coming in. Expanding methods are generic and you can pass any aggregation functions you want. # We can do window operations on columns, by using axis= argument. sums.expanding().sum().plot(); # - sums.expanding().max().plot(); # same as: sums.cummax().plot(); totals = sums.sum() total_part = sums['G'].expanding().aggregate(lambda x: x.sum()/totals['G']) # here x argument is a series total_part.plot();
Expanding_And_Rolling_Windows.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/iambrookedrake/DS-Unit-2-Kaggle-Challenge/blob/master/module4-classification-metrics/Brooke_Drake_DSPT6_U2S2M4_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="0R-kBNTpCH6b" colab_type="text" # Lambda School Data Science # # *Unit 2, Sprint 2, Module 4* # # --- # + [markdown] colab_type="text" id="nCc3XZEyG3XV" # # Classification Metrics # # ## Assignment # - [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset. # - [ ] Plot a confusion matrix for your Tanzania Waterpumps model. # - [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 70% accuracy (well above the majority class baseline). # - [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_ # - [ ] Commit your notebook to your fork of the GitHub repo. # - [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](http://archive.is/DelgE), by Lambda DS3 student <NAME>. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook. # # # ## Stretch Goals # # ### Reading # # - [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_ # - [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb) # - [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) # # # ### Doing # - [ ] Share visualizations in our Slack channel! # - [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook) # - [ ] Stacking Ensemble. (See module 3 assignment notebook) # - [ ] More Categorical Encoding. (See module 2 assignment notebook) # + colab_type="code" id="o9eSnDYhUGD7" outputId="9c16bb63-c006-4926-80b5-1fdae3d9518a" colab={"base_uri": "https://localhost:8080/", "height": 1000} import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/' # !pip install category_encoders==2.* # !pip install pandas-profiling==2.* # If you're working locally: else: DATA_PATH = '../data/' # + colab_type="code" id="QJBD4ruICm1m" outputId="177c06ad-fa16-4977-8ce7-d2599f765ff2" colab={"base_uri": "https://localhost:8080/", "height": 34} import pandas as pd train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'), pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv')) test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv') sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv') train.shape, test.shape # + id="HS9vhR82-4k5" colab_type="code" colab={} # %matplotlib inline import category_encoders as ce import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from sklearn.impute import SimpleImputer from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.ensemble import RandomForestClassifier def wrangle(X): """Wrangles train, validate, and test sets in the same way""" X = X.copy() X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True) X['year_recorded'] = X['date_recorded'].dt.year X['month_recorded'] = X['date_recorded'].dt.month X['day_recorded'] = X['date_recorded'].dt.day X = X.drop(columns='date_recorded') X['years'] = X['year_recorded'] - X['construction_year'] unusable_variance = ['recorded_by', 'id'] X = X.drop(columns=unusable_variance) duplicate_columns = ['quantity_group'] X = X.drop(columns=duplicate_columns) X['latitude'] = X['latitude'].replace(-2e-08, np.nan) cols_with_zeros = ['construction_year', 'longitude', 'latitude', 'gps_height', 'population'] for col in cols_with_zeros: X[col] = X[col].replace(0, np.nan) return X # + id="mBAK3ZxP_ApQ" colab_type="code" colab={} # Split train into train & val. Make val the same size as test. target = 'status_group' train, val = train_test_split(train, test_size=len(test), stratify=train[target], random_state=42) # Wrangle train, validate, and test sets in the same way train = wrangle(train) val = wrangle(val) test = wrangle(test) # Arrange data into X features matrix and y target vector X_train = train.drop(columns=target) y_train = train[target] X_val = val.drop(columns=target) y_val = val[target] X_test = test # Make pipeline! pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='mean'), RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1) ) # + id="0hqyhBGHdaN0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="795277a8-4efc-4f22-a323-d2802e4f941a" pipeline.fit(X_train, y_train) y_pred = pipeline.predict(X_val) print('Validation Accuracy', accuracy_score(y_val, y_pred)) # + id="_eYl_dr9_RMQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 406} outputId="99adf50f-f4fb-4b75-e526-3b76a7636029" ###Plot a confusion matrix for your Tanzania Waterpumps model. from sklearn.metrics import plot_confusion_matrix plot_confusion_matrix(pipeline, X_val, y_val, values_format='.0f', xticks_rotation='vertical', cmap='Blues') # + id="7sfx3yYK_ls0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="6fba982b-1769-4f5e-f86e-7252c4ee11c8" plot_confusion_matrix(pipeline, X_val, y_val, normalize='true', values_format='.2f', xticks_rotation='vertical', cmap='Blues') # + id="BqH0bVncFWUt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="d9c34a0a-2da7-4f5b-8a9b-8a4c8977153f" y_train = y_train != 'functional' y_val = y_val != 'functional' y_train.value_counts(normalize=True) # + id="nIzGhSXtTtVo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="30ebb97f-89e8-47f1-ff5e-3f61b72f1aa3" pipeline.fit(X_train, y_train) y_pred = pipeline.predict(X_val) print('Validation Accuracy', accuracy_score(y_val, y_pred)) # + id="EwGkFO4V_l2K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="39a215e0-d6eb-41ed-bee0-9751d8014337" pipeline.predict_proba(X_val) # + id="U2dewMJaA7Vb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="740e04d2-cbea-4af4-c7b0-032162de7b8b" y_pred = pipeline.predict(X_val) y_pred # + id="oxiKORuFBhF2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="74789b6b-5653-4e9d-e91b-84f2a7b02d02" pipeline.predict_proba(X_val)[:, 1] > 0.5 # + id="Z6oux3JnBg-U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="41a7a4ee-7607-4fdf-d955-699f17533cfc" y_pred_proba = pipeline.predict_proba(X_val)[:, 1] sns.distplot(y_pred_proba) # + id="krXpsTG4A7RE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="aea67f4f-0abd-46fe-8525-bad32183bf7c" thres = 0.5 ax = sns.distplot(y_pred_proba) ax.axvline(thres, color='red') pd.Series(y_pred).value_counts() # + id="yyrtTy79DeqF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="568271b8-49f7-499c-a0b5-8601b48d8aad" y_pred # + id="-PzsZfSU1Atm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="cce22a4a-52b6-4d06-b3cf-35155d8dc3fb" submission = sample_submission.copy() submission['status_group'] = y_pred submission.head() # + id="xD3uCY5AZibw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="5cb43225-0e4d-4a55-da24-420c3d4ae16f" submission['status_group'] = submission['status_group'].astype(str) submission # + id="MGp-Z_7DZwul" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="72e771ac-6986-40c8-8127-1a53f60ad253" submission.replace('True',value='non functional',inplace=True) submission.replace('False',value='functional',inplace=True) submission # + id="k3dQEm2rHAB8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 348} outputId="94e92180-1d0b-4c8d-c1d8-5eb8fd26b631" submission.to_csv('brooke_drake_submission.csv', index=False) from google.colab import files files.download('brooke_drake_submission.csv')
module4-classification-metrics/Brooke_Drake_DSPT6_U2S2M4_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # Amazon Forecast: predicting time-series at scale # # Forecasting is used in a variety of applications and business use cases: For example, retailers need to forecast the sales of their products to decide how much stock they need by location, Manufacturers need to estimate the number of parts required at their factories to optimize their supply chain, Businesses need to estimate their flexible workforce needs, Utilities need to forecast electricity consumption needs in order to attain an efficient energy network, and # enterprises need to estimate their cloud infrastructure needs. # # <img src="../../common/images/amazon_forecast.png"> # # # Table of Contents # # * Step 0: [Setting up](#setup) # * Step 1: [Preparing the Datasets](#prepare) # * Step 2: [Importing the Data](#import) # * Step 2a: [Creating a Dataset Group](#create) # * Step 2b: [Creating a Target Dataset](#target) # * Step 2c: [Creating a Item Metadata Dataset](#metadata) # * Step 2d: [Update the Dataset Group](#update) # * Step 2e: [Creating a Target Time Series Dataset Import Job](#targetImport) # * Step 2f: [Creating a Item Metadata Dataset Import Job](#metadataImport) # * Step 3: [Choosing an Algorithm and Evaluating its Performance](#algo) # * Step 4: [Computing Error Metrics from Backtesting](#error) # * Step 5: [Creating a Forecast](#forecast) # * Step 6: [Querying the Forecasts](#query) # * Step 7: [Exporting the Forecasts](#export) # * Step 8: [Clearning up your Resources](#cleanup) # # # First let us setup Amazon Forecast<a class="anchor" id="setup"> # # This section sets up the permissions and relevant endpoints. # + import sys import os import boto3 import pandas as pd import matplotlib.pyplot as plt # importing forecast notebook utility from notebooks/common directory sys.path.insert( 0, os.path.abspath("../../common") ) import util plt.rcParams['figure.figsize'] = (15.0, 5.0) # - # Configure the S3 bucket name and region name for this lesson. # # - If you don't have an S3 bucket, create it first on S3. # - Although we have set the region to us-west-2 as a default value below, you can choose any of the regions that the service is available in. text_widget_bucket = util.create_text_widget( "bucket_name", "input your S3 bucket name" ) text_widget_region = util.create_text_widget( "region", "input region name.", default_value="us-west-2" ) # + bucket_name = text_widget_bucket.value assert bucket_name, "bucket_name not set." region = text_widget_region.value assert region, "region not set." # - session = boto3.Session(region_name=region) forecast = session.client(service_name='forecast') forecast_query = session.client(service_name='forecastquery') # Create the role to provide to Amazon Forecast. role_name = "ForecastNotebookRole-ItemMetadata" role_arn = util.get_or_create_iam_role( role_name = role_name ) # # Overview # # <img src="images/outline.png"> # # <img src="../../common/images/forecast_workflow.png"> # # The above figure summarizes the key workflow of using Forecast. # # Step 1: Preparing the Datasets<a class="anchor" id="prepare"> time_series_df = pd.read_csv("../../common/data/item-demand-time.csv", dtype = object, names = ['timestamp', 'target_value', 'item']) time_series_df.head(3) # In addition to the target timeseries dataset, we upload an item metadata dataset. Item metadata is any categorical feature that is applicable to the target timeseries data. For example, to forecast sales of # a particular product, attributes of this product, such as brand, color, and genre are a part of item metadata. Unlike related time series that depend on time, item metadata is fixed in time and each item has one associated item metadata value, e.g. the category or bin that it belongs to. In the following example, we will use region as the item metadata. item_metadata_df = pd.read_csv("data/item-meta.csv", dtype = object, names = ['item', 'region']) item_metadata_df.head(3) # In order to use the item metadata dataset, we should make sure that the set of items that are present in the target timeseries is the same as the set of items that are present in the item metadata dataset. If an item has no relevant value for a particular attribute, the value `"N\A"` can be specified. # # In addition, the number of items in each category should be large enough to produce a sufficient signal for the algorithm to use. We advise against using sparse categories. For example, if there are categories consisting of only 10% or fewer of the items, we recommend creating a new category labeled `"others"`, which combines all these categories with the small amount of items into one and removes those old category labels. target_items = set(time_series_df["item"].tolist()) metadata_items = set(item_metadata_df["item"].tolist()) print(target_items) print(metadata_items) assert len(target_items - metadata_items) == 0, "items do not match" # As we can see, the set of items in the target dataset is the same as the set of items in the item metadata. We can go ahead and upload the datasets. s3 = session.client('s3') # + key = "item_metadata_demo_small" s3.upload_file(Filename="../../common/data/item-demand-time.csv", Bucket = bucket_name, Key = f"{key}/target.csv") s3.upload_file(Filename="data/item-meta.csv", Bucket = bucket_name, Key = f"{key}/metadata.csv") # - # # Step 2. Importing the Data<a class="anchor" id="import"> # Now we are ready to import the datasets into the Forecast service. Starting from the raw data, Amazon Forecast automatically extracts the dataset that is suitable for forecasting. As an example, a retailer normally records the transaction record such as # <img src="images/data_format.png"> # <img src="images/import1.png"> # <img src="images/import2.png"> project = "item_metadata_demo" idx = 0 # so that we have a unique name s3_data_path = f"s3://{bucket_name}/{key}" # Below, we specify key input data and forecast parameters freq = "H" forecast_horizon = 24 timestamp_format = "yyyy-MM-dd HH:mm:ss" delimiter = ',' # ## Step 2a. Creating a Dataset Group<a class="anchor" id="create"> # First let's create a dataset group and then update it later to add our datasets. dataset_group = f"{project}_gp_{idx}" dataset_arns = [] create_dataset_group_response = forecast.create_dataset_group(DatasetGroupName=dataset_group, DatasetArns=dataset_arns, Domain="CUSTOM") print(f'Creating dataset group {dataset_group}') dataset_group_arn = create_dataset_group_response['DatasetGroupArn'] forecast.describe_dataset_group(DatasetGroupArn=dataset_group_arn) # ## Step 2b. Creating a Target Dataset<a class="anchor" id="target"> # In this example, we will define a target time series. This is a required dataset to use the service. # # Below we specify the target time series name: ts_dataset_name = f"{project}_ts_{idx}" print(ts_dataset_name) # Next, we specify the schema of our dataset below. Make sure the order of the attributes (columns) matches the raw data in the files. We follow the same three attribute format as the above example. ts_schema_val = [{"AttributeName": "timestamp", "AttributeType": "timestamp"}, {"AttributeName": "target_value", "AttributeType": "float"}, {"AttributeName": "item_id", "AttributeType": "string"}] ts_schema = {"Attributes": ts_schema_val} print(f'Creating target dataset {ts_dataset_name}') response = forecast.create_dataset(Domain="CUSTOM", DatasetType='TARGET_TIME_SERIES', DatasetName=ts_dataset_name, DataFrequency=freq, Schema=ts_schema ) ts_dataset_arn = response['DatasetArn'] forecast.describe_dataset(DatasetArn=ts_dataset_arn) # ## Step 2c. Creating a Item Metadata Dataset<a class="anchor" id="metadata"> # In this example, we will define a item metadata dataset. # # Below we specify the item metadata name: # # meta_dataset_name = f"{project}_meta_{idx}" print(meta_dataset_name) # Specify the schema of your dataset here. Make sure the order of columns matches the raw data files. meta_schema_val = [{"AttributeName": "item_id", "AttributeType": "string"}, {"AttributeName": "region_id", "AttributeType": "string"}] meta_schema = {"Attributes": meta_schema_val} print(f'Creating item_metadata dataset {meta_dataset_name}') response = forecast.create_dataset(Domain="CUSTOM", DatasetType='ITEM_METADATA', DatasetName=meta_dataset_name, Schema=meta_schema ) meta_dataset_arn = response['DatasetArn'] forecast.describe_dataset(DatasetArn = meta_dataset_arn) # ## Step 2d. Updating the dataset group with the datasets we created<a class="anchor" id="update"> # You can have multiple datasets under the same dataset group. Update it with the datasets we created before. dataset_arns = [] dataset_arns.append(ts_dataset_arn) dataset_arns.append(meta_dataset_arn) forecast.update_dataset_group(DatasetGroupArn=dataset_group_arn, DatasetArns=dataset_arns) forecast.describe_dataset_group(DatasetGroupArn=dataset_group_arn) # ## Step 2e. Creating a Target Time Series Dataset Import Job<a class="anchor" id="targetImport"> ts_s3_data_path = f"{s3_data_path}/target.csv" ts_dataset_import_job_response = forecast.create_dataset_import_job(DatasetImportJobName=dataset_group, DatasetArn=ts_dataset_arn, DataSource= { "S3Config" : { "Path": ts_s3_data_path, "RoleArn": role_arn } }, TimestampFormat=timestamp_format) ts_dataset_import_job_arn=ts_dataset_import_job_response['DatasetImportJobArn'] status = util.wait(lambda: forecast.describe_dataset_import_job(DatasetImportJobArn=ts_dataset_import_job_arn)) assert status # ## Step 2f. Creating a Item Metadata Dataset Import Job<a class="anchor" id="metadataImport"> meta_s3_data_path = f"{s3_data_path}/metadata.csv" meta_dataset_import_job_response = forecast.create_dataset_import_job(DatasetImportJobName=dataset_group, DatasetArn=meta_dataset_arn, DataSource= { "S3Config" : { "Path": meta_s3_data_path, "RoleArn": role_arn } }) meta_dataset_import_job_arn=meta_dataset_import_job_response['DatasetImportJobArn'] status = util.wait(lambda: forecast.describe_dataset_import_job(DatasetImportJobArn=meta_dataset_import_job_arn)) assert status # # Step 3. Choosing an algorithm and evaluating its performance<a class="anchor" id="algo"> # # Once the datasets are specified with the corresponding schema, Amazon Forecast will automatically aggregate all the relevant pieces of information for each item, such as sales, price, promotions, as well as categorical attributes, and generate the desired dataset. Next, one can choose an algorithm (forecasting model) and evaluate how well this particular algorithm works on this dataset. The following graph gives a high-level overview of the forecasting models. # <img src="images/recipes.png"> # <img src="images/pred_details.png"> # # Amazon Forecast provides several state-of-the-art forecasting algorithms including classic forecasting methods such as ETS, ARIMA, Prophet and deep learning approaches such as DeepAR+. Classical forecasting methods, such as Autoregressive Integrated Moving Average (ARIMA) or Exponential Smoothing (ETS), fit a single model to each individual time series, and then use that model to extrapolate the time series into the future. Amazon's Non-Parametric Time Series (NPTS) forecaster also fits a single model to each individual time series. Unlike the naive or seasonal naive forecasters that use a fixed time index (the previous index $T-1$ or the past season $T - \tau$) as the prediction for time step $T$, NPTS randomly samples a time index $t \in \{0, \dots T-1\}$ in the past to generate a sample for the current time step $T$. # # In many applications, you may encounter many similar time series across a set of cross-sectional units. Examples of such time series groupings are demand for different products, server loads, and requests for web pages. In this case, it can be beneficial to train a single model jointly over all of these time series. DeepAR+ takes this approach, outperforming the standard ARIMA and ETS methods when your dataset contains hundreds of related time series. The trained model can also be used for generating forecasts for new time series that are similar to the ones it has been trained on. While deep learning approaches can outperform standard methods, this is only possible when there is sufficient data available for training. It is not true for example when one trains a neural network with a time-series contains only a few dozens of observations. Amazon Forecast provides the best of two worlds allowing users to either choose a specific algorithm or let Amazon Forecast automatically perform model selection. # # ## How to evaluate a forecasting model? # # Before moving forward, let's first introduce the notion of *backtest* when evaluating forecasting models. The key difference between evaluating forecasting algorithms and standard ML applications is that we need to make sure there is no future information gets used in the past. In other words, the procedure needs to be causal. # # <img src="images/backtest.png"> # # # In this notebook, we will use the neural network based method, DeepAR+, which is the only algorithm in Amazon Forecast that supports categorical features. # algorithm_arn = 'arn:aws:forecast:::algorithm/' algorithm = 'Deep_AR_Plus' algorithm_arn_deep_ar_plus = algorithm_arn + algorithm predictor_name_deep_ar = f'{project}_{algorithm.lower()}_{idx}' print(f'[{predictor_name_deep_ar}] Creating predictor {predictor_name_deep_ar} ...') create_predictor_response = forecast.create_predictor(PredictorName=predictor_name_deep_ar, AlgorithmArn=algorithm_arn_deep_ar_plus, ForecastHorizon=forecast_horizon, PerformAutoML=False, PerformHPO=False, InputDataConfig= {"DatasetGroupArn": dataset_group_arn}, FeaturizationConfig= {"ForecastFrequency": freq} ) predictor_arn_deep_ar = create_predictor_response['PredictorArn'] status = util.wait(lambda: forecast.describe_predictor(PredictorArn=predictor_arn_deep_ar)) assert status forecast.describe_predictor(PredictorArn=predictor_arn_deep_ar) # # Step 4. Computing Error Metrics from Backtesting<a class="anchor" id="error"> # After creating the predictors, we can query the forecast accuracy given by the backtest scenario and have a quantitative understanding of the performance of the algorithm. Such a process is iterative in nature during model development. When an algorithm with satisfying performance is found, the customer can deploy the predictor into a production environment, and query the forecasts for a particular item to make business decisions. print('Done creating predictor. Getting accuracy numbers for DeepAR+ ...') error_metrics_deep_ar_plus = forecast.get_accuracy_metrics(PredictorArn=predictor_arn_deep_ar) error_metrics_deep_ar_plus def extract_summary_metrics(metric_response, predictor_name): df = pd.DataFrame(metric_response['PredictorEvaluationResults'] [0]['TestWindows'][0]['Metrics']['WeightedQuantileLosses']) df['Predictor'] = predictor_name return df # The below figure shows a bar plot of various weighted quantile loss metrics (wQuantileLoss[$\tau$] for $\tau = 0.1, 0.5, 0.9$, corresponding to the `p10, p50, p90` quantile forecasts) of the predictor. deep_ar_metrics = extract_summary_metrics(error_metrics_deep_ar_plus, "DeepAR") pd.concat([deep_ar_metrics]) \ .pivot(index='Quantile', columns='Predictor', values='LossValue').plot.bar(); # # Step 5. Creating a Forecast<a class="anchor" id="forecast"> # # Next we re-train with the full dataset, and create the forecast. print(f"Done fetching accuracy numbers. Creating forecaster for DeepAR+ ...") forecast_name_deep_ar = f'{project}_deep_ar_plus_{idx}' create_forecast_response_deep_ar = forecast.create_forecast(ForecastName=forecast_name_deep_ar, PredictorArn=predictor_arn_deep_ar) forecast_arn_deep_ar = create_forecast_response_deep_ar['ForecastArn'] status = util.wait(lambda: forecast.describe_forecast(ForecastArn=forecast_arn_deep_ar)) assert status forecast.describe_forecast(ForecastArn=forecast_arn_deep_ar) # # Step 6. Querying the Forecasts<a class="anchor" id="query"> item_id = 'client_12' forecast_response_deep = forecast_query.query_forecast( ForecastArn=forecast_arn_deep_ar, Filters={"item_id": item_id}) fname = f'../../common/data/item-demand-time.csv' exact = util.load_exact_sol(fname, item_id, True) # The below figure shows a sample plot of different quantile forecasts (`p10, p50, p90`) of the predictor. The `p50` quantile forecast is shown as the solid black line with the purple region denoting the confidence itnerval from the `p10` to the `p90` quantile forecasts. The vertical green dashed lines denote the prediction interval. util.plot_forecasts(forecast_response_deep, exact) plt.title("DeepAR Forecast") # # Step 7. Exporting your Forecasts<a class="anchor" id="export"> forecast_export_name_deep_ar = f'{project}_forecast_export_deep_ar_plus_{idx}' forecast_export_name_deep_ar_path = f"{s3_data_path}/{forecast_export_name_deep_ar}" create_forecast_export_response_deep_ar = forecast.create_forecast_export_job(ForecastExportJobName=forecast_export_name_deep_ar, ForecastArn=forecast_arn_deep_ar, Destination={ "S3Config" : { "Path": forecast_export_name_deep_ar_path, "RoleArn": role_arn } }) forecast_export_arn_deep_ar = create_forecast_export_response_deep_ar['ForecastExportJobArn'] status = util.wait(lambda: forecast.describe_forecast_export_job(ForecastExportJobArn = forecast_export_arn_deep_ar)) assert status # # Step 8. Cleaning up your Resources<a class="anchor" id="cleanup"> # Once we have completed the above steps, we can start to cleanup the resources we created from the bottom up. All delete jobs, except for `delete_dataset_group` are asynchronous, so we have added the helpful `wait_till_delete` function. # Resource Limits are documented <a href="https://docs.aws.amazon.com/forecast/latest/dg/limits.html">here</a>. # Delete forecast export util.wait_till_delete(lambda: forecast.delete_forecast_export_job(ForecastExportJobArn = forecast_export_arn_deep_ar)) # Delete forecast util.wait_till_delete(lambda: forecast.delete_forecast(ForecastArn = forecast_arn_deep_ar)) # Delete predictor util.wait_till_delete(lambda: forecast.delete_predictor(PredictorArn = predictor_arn_deep_ar)) # Delete the target time series and item_metadata dataset import jobs util.wait_till_delete(lambda: forecast.delete_dataset_import_job(DatasetImportJobArn=ts_dataset_import_job_arn)) util.wait_till_delete(lambda: forecast.delete_dataset_import_job(DatasetImportJobArn=meta_dataset_import_job_arn)) # Delete the target time series and item_metadata datasets util.wait_till_delete(lambda: forecast.delete_dataset(DatasetArn=ts_dataset_arn)) util.wait_till_delete(lambda: forecast.delete_dataset(DatasetArn=meta_dataset_arn)) # Delete dataset group forecast.delete_dataset_group(DatasetGroupArn=dataset_group_arn) # Delete IAM role util.delete_iam_role( role_name )
notebooks/advanced/Incorporating_Item_Metadata_Dataset_to_your_Predictor/Incorporating_Item_Metadata_Dataset_to_your_Predictor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np from game_tools import NormalFormGame from logitdyn import LogitDynamics from __future__ import division coo_payoffs = np.array([[4,0], [3,2]]) g_coo = NormalFormGame(coo_payoffs) # ### test_simulate_LLN u = coo_payoffs beta = 1.0 P = np.zeros((2,2)) # I made a probabilistic choice matrix $P$ in a redundant way just in case. P[0,0] = np.exp(u[0,0] * beta) / (np.exp(u[0,0] * beta) + np.exp(u[1,0] * beta)) P[0,0] P[1,0] = np.exp(u[1,0] * beta) / (np.exp(u[0,0] * beta) + np.exp(u[1,0] * beta)) P[1,0] P[0,1] = np.exp(u[0,1] * beta) / (np.exp(u[0,1] * beta) + np.exp(u[1,1] * beta)) P[0,1] P[1,1] = np.exp(u[1,1] * beta) / (np.exp(u[0,1] * beta) + np.exp(u[1,1] * beta)) P[1,1] print P # $P[i,j]$ represents the probability that a player chooses an action $i$ provided that his opponent takes an action $j$. Q = np.zeros((4,4)) Q[0, 0] = P[0, 0] Q[0, 1] = 0.5 * P[1, 0] Q[0, 2] = 0.5 * P[1, 0] Q[0, 3] = 0 Q[1, 0] = 0.5 * P[0, 0] Q[1, 1] = 0.5 * P[0, 1] + 0.5 * P[1, 0] Q[1, 2] = 0 Q[1, 3] = 0.5 * P[1, 1] Q[2, 0] = 0.5 * P[0, 0] Q[2, 1] = 0 Q[2, 2] = 0.5 * P[1, 0] + 0.5 * P[0, 1] Q[2, 3] = 0.5 * P[1, 1] Q[3, 0] = 0 Q[3, 1] = 0.5 * P[0, 1] Q[3, 2] = 0.5 * P[0, 1] Q[3, 3] = P[1, 1] print Q # $Q$ is the transition probability matrix. The first row and column represent the state $(0,0)$, which means that player 1 takes action 0 and player 2 also takes action 0. The second ones represent $(0,1)$, the third ones represent $(1,0)$, and the last ones represent $(1,1)$. from quantecon.mc_tools import MarkovChain mc = MarkovChain(Q) mc.stationary_distributions[0] # I take 0.61029569 as the criterion for the test. ld = LogitDynamics(g_coo) # New one (using replicate) n = 1000 seq = ld.replicate(T=100, num_reps=n) count = 0 for i in range(n): if all(seq[i, :] == [1, 1]): count += 1 ratio = count / n ratio # Old one counts = np.zeros(1000) for i in range(1000): seq = ld.simulate(ts_length=100) count = 0 for j in range(100): if all(seq[j, :] == [1, 1]): count += 1 counts[i] = count m = counts.mean() / 100 m
test_logitdyn.ipynb