code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os from io import open """Writing the data per file into a dictionary for which the key is the category of news""" import unicodedata import string all_letters = string.ascii_letters + ".,;'& " n_letters = len(all_letters) def unitoAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in all_letters ) def readFile_byline(filename): line_of_news = open("news_data_test/"+filename , encoding = 'utf-8').read().split('\n') return [unitoAscii(line) for line in line_of_news] # + category_news = {} all_categories = [] list_of_files = os.listdir("news_data_test/") #Dictionary which maps every category of news to it's description for _file in list_of_files: category = _file.split(".")[0] all_categories.append(category) news_descp = readFile_byline(_file) category_news[category] = news_descp num_categories = len(all_categories) print(num_categories) count_of_news_category = {} average_words_per_number_of_samples = [] #Counting news items per category of news for key in category_news.keys(): item = category_news.get(key) number_of_news_items = len(item) count_of_news_category[key] = number_of_news_items count_of_words = 0 for sentence in item: count_of_words += len(sentence.split(" ")) average_words_per_number_of_samples.append(count_of_words/number_of_news_items) # + import random import matplotlib import matplotlib.pyplot as plt fig = plt.figure() category = count_of_news_category.keys() value = count_of_news_category.values() plt.bar(category, value) plt.xticks(rotation=90) plt.tight_layout() #plt.savefig('Number_of_articles_per_category.png') plt.show() fig1 = plt.figure() category = count_of_news_category.keys() value = average_words_per_number_of_samples plt.bar(category, value) plt.xticks(rotation=90) plt.tight_layout() #plt.savefig('Words_per_category.png') plt.show() # + import torch import torch.nn as nn from torch import optim import torch.nn.functional as F device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") #print(device) category_plus_news_list = [] for key in category_news.keys(): for news_item in category_news[key]: category_plus_news_list.append((key, news_item)) # + from InferSentModel import InferSent model_version = 2 MODEL_PATH = "encoder/infersent%s.pkl" % model_version params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048, 'pool_type': 'max', 'dpout_model': 0.0, 'version': model_version} model = InferSent(params_model) model.load_state_dict(torch.load(MODEL_PATH)) use_cuda = True model = model.to(torch.device('cuda:0')) if use_cuda else model W2V_PATH = 'fastText/crawl-300d-2M.vec' model.set_w2v_path(W2V_PATH) model.build_vocab_k_words(K=1000000) # + list_of_sentences = [] labels =[] count_of_lables=[] for pair in category_plus_news_list: label = pair[0] sentences = pair[1] if len(sentences.split(" ")) >= 5: list_of_sentences.append(sentences) labels.append(label) #print(list_of_sentences[0:20]) print(len(list_of_sentences)) #print((labels[0:20])) # - #embeddings_business = model.encode(list_of_sentences_business, bsize=128, tokenize=False, verbose=True) embeddings = model.encode(list_of_sentences, bsize=128, tokenize=False, verbose=True) #embeddings_politics = model.encode(list_of_sentences_politics, bsize=128, tokenize=False, verbose=True) #embeddings_religion = model.encode(list_of_sentences_religion, bsize=128, tokenize=False, verbose=True) #embeddings_food = model.encode(list_of_sentences_food, bsize=128, tokenize=False, verbose=True) #embeddings_home = model.encode(list_of_sentences_home, bsize=128, tokenize=False, verbose=True) #print('nb sentences encoded : {0}'.format(len(embeddings_business))) #print(embeddings_business.shape) print('nb sentences encoded : {0}'.format(len(embeddings))) print(embeddings.shape) # + label_list = list(category_news.keys()) index_class_map_dict={} for idx, value in enumerate(label_list): index_class_map_dict[value]=idx print(index_class_map_dict) # + embedded_sentences = [] for index, embedding in enumerate(embeddings): input_vector = embedding target_vector = labels[index] target_class = index_class_map_dict[target_vector] embedded_sentences.append((input_vector, target_class)) #print(embedded_sentences[0][1]) # + '''label_list = list(category_news.keys()) for label in labels: for idx, value in enumerate(label_list): if label == value: labels[labels.index(label)] = idx''' # + label_list = list(category_news.keys()) index_class_map=[] for idx, value in enumerate(label_list): index_class_map.append((idx,value)) print(index_class_map) # + label_list = list(category_news.keys()) index_class_map=[] for idx, value in enumerate(label_list): index_class_map.append((idx,value)) print(index_class_map) label_list = list(category_news.keys()) index_class_map_dict={} for idx, value in enumerate(label_list): index_class_map_dict[idx]=value print(index_class_map_dict.keys()) # - def randomChoice(l): return l[random.randint(0, len(l) - 1)] # + import torch torch.manual_seed(0) import numpy as np np.random.seed(0) import random random.seed(0) # - import math class SNNLinear(nn.Module): def __init__(self, input_size, output_size): super().__init__() self.fc = nn.Linear(input_size, output_size) nn.init.normal_(self.fc.weight, std = math.sqrt(1/input_size)) def forward(self, inputs): return self.fc(inputs) # + device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") class NN(nn.Module): """ Simple NN architecture with 3 fully connected layers and SeLU activation """ def __init__(self, input_size, hidden_size, output_size): super().__init__() fc1 = SNNLinear(input_size, hidden_size) fc2 = SNNLinear(hidden_size, hidden_size//2) fc3 = SNNLinear(hidden_size//2, output_size) self.net = nn.Sequential(fc1, nn.SELU(), nn.AlphaDropout(0.2), fc2, nn.SELU(), nn.AlphaDropout(0.2), fc3) #nn.SELU(), nn.AlphaDropout(0.5), fc4) def forward(self, input): return self.net(input) print(device) # - def train(input_tensor, target_tensor, model, model_optimizer, criterion): model.train() model_optimizer.zero_grad() input_length = input_tensor.shape target_length = target_tensor.shape output = model(input_tensor) loss = criterion(output, target_tensor) loss.backward() model_optimizer.step() return output, loss.item() def _eval(input_tensor, target_tensor, model, model_optimizer, criterion): model.eval() with torch.no_grad(): input_length = input_tensor.shape target_length = target_tensor.shape output = model(input_tensor) loss = criterion(output, target_tensor) return output, loss.item() # + import time import math def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def timeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (- %s)' % (asMinutes(s), asMinutes(rs)) # + num_of_items_per_class = [5827, 15920, 3361, 2438, 6195, 6137, 2730, 2177, 1699, 6524, 1123, 986, 9826, 3941, 8664, 1362, 17768, 4693, 2229, 9601, 1121, 4172, 2109, 2611, 6076, 3421, 5008, 1321, 2078, 32241, 2533, 3459, 3404, 1376, 2067, 4463, 3641, 3821] weights = [] for i in num_of_items_per_class: weights.append(1/i) #print(len(weights)) class_weights = torch.FloatTensor(weights).to(device) # + class Dataset(torch.utils.data.Dataset): def __init__(self, list_of_data): self.list_of_data = list_of_data # get one sample def __getitem__(self, idx): sample = self.list_of_data[idx] input_tensor = torch.from_numpy(sample[0]).float() target_tensor = torch.tensor(sample[1]) return input_tensor, target_tensor def __len__(self): return len(self.list_of_data) dataset = Dataset(embedded_sentences) _input, _target = dataset.__getitem__(0) print(_input.shape, _target.shape) val_size = 0.1 test_size = 0.1 test_amount, val_amount = int(dataset.__len__() * test_size), int(dataset.__len__() * val_size) print(test_amount, val_amount) train_set, val_set, test_set = torch.utils.data.random_split(dataset, [ (dataset.__len__() - (test_amount + val_amount)), test_amount, val_amount ]) train_dataloader = torch.utils.data.DataLoader( train_set, batch_size=128, shuffle=True, ) val_dataloader = torch.utils.data.DataLoader( val_set, batch_size=128, shuffle=False, ) test_dataloader = torch.utils.data.DataLoader( test_set, batch_size=128, shuffle=False, ) # + from torch.optim.lr_scheduler import * learning_rate = 1e-4 def trainIters(model, n_iters, embedded, val_embedded, print_every, learning_rate=learning_rate): start = time.time() plot_losses_train = [] plot_losses_val =[] print_loss_total_train = 0 # Reset every print_every plot_loss_total_train = 0 # Reset every plot_every print_loss_total_val = 0 # Reset every print_every plot_loss_total_val = 0 # Reset every plot_every print_acc_total_train = 0 plot_acc_total_train = 0 plot_acc_train = [] print_acc_total_val = 0 plot_acc_total_val = 0 plot_acc_val = [] train_epochs = [] val_epochs = [] #TODO: Try ADAM model_optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay = 1e-5) #TODO: Learning rate scheduler scheduler = StepLR(model_optimizer, step_size=50, gamma=0.1) criterion = nn.CrossEntropyLoss() total_steps = n_iters*len(embedded) for epoch in range(n_iters): for local_step, (_input, _target) in enumerate(embedded, 1): input_tensor = _input.to(device) #noise = torch.randn_like(input_tensor) * 1e-3 #input_tensor = input_tensor + noise target_tensor = _target.to(device) output, loss = train(input_tensor, target_tensor, model, model_optimizer, criterion) accuracy = (output.argmax(-1) == target_tensor).float().mean() print_loss_total_train += loss plot_loss_total_train += loss print_acc_total_train += accuracy plot_acc_total_train += accuracy global_step = epoch * len(embedded) + local_step if global_step % print_every == 0: print_loss_avg_train = print_loss_total_train / print_every print_loss_total_train = 0 print('%s (%d %d%%) train_loss = %.4f' % (timeSince(start, global_step / total_steps), global_step, global_step / total_steps * 100, print_loss_avg_train)) plot_loss_avg_train = plot_loss_total_train / len(embedded) plot_losses_train.append(plot_loss_avg_train) plot_avg_acc_train = plot_acc_total_train / len(embedded) plot_acc_train.append(plot_avg_acc_train) plot_loss_total_train = 0 plot_acc_total_train = 0 train_epochs.append(epoch) if epoch % 5 == 0: for (_input, _target) in val_embedded: input_tensor = _input.to(device) target_tensor = _target.to(device) output, loss = _eval(input_tensor, target_tensor, model, model_optimizer, criterion) accuracy = (output.argmax(-1) == target_tensor).float().mean() print_loss_total_val += loss plot_loss_total_val += loss print_acc_total_val += accuracy plot_acc_total_val += accuracy print_loss_avg_val = print_loss_total_val / len(val_embedded) print_loss_total_val = 0 print_avg_acc = print_acc_total_val/ len(val_embedded) print_acc_total_val = 0 print('val_loss = %.4f acc = %.4f' % (print_loss_avg_val, print_avg_acc)) plot_loss_avg_val = plot_loss_total_val / len(val_embedded) plot_avg_acc_val = plot_acc_total_val / len(val_embedded) plot_losses_val.append(plot_loss_avg_val) plot_acc_val.append(plot_avg_acc_val) plot_loss_total_val = 0 plot_acc_total_val = 0 val_epochs.append(epoch) scheduler.step() #print(train_epochs) #print(val_epochs) import matplotlib.pyplot as plt plt.switch_backend('agg') import matplotlib.ticker as ticker import numpy as np plt.figure(figsize=(10,10)) plt.plot(train_epochs, plot_losses_train, linewidth=5) plt.plot(val_epochs, plot_losses_val, linewidth=5) plt.legend(['train loss', 'val loss'], loc = 'upper right') plt.savefig('loss.png') plt.show() plt.figure(figsize=(10,10)) plt.plot(val_epochs, plot_acc_val, linewidth=5) plt.savefig('acc.png') plt.legend(['val_acc'], loc = 'upper right') plt.show() # + hidden_size = 128 input_size = embeddings.shape[1] output_size = 38 model = NN(input_size, hidden_size, output_size).to(device) learning_rate = 1e-4 #Note : may need more epochs range [100-300] epochs = 100 # + from tqdm.notebook import tqdm trainIters(model, epochs, train_dataloader, val_dataloader, print_every=5000, learning_rate = learning_rate) # -
RNNModel/RNN_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Input a newick file # Get just the tip names # in this case, tip names are just NCBI IDs # From NCBI get the species name # Replace the NCBI ID in the newick with the Species name # Save file. # - # Imports from ete3 import Tree, faces, AttrFace, TreeStyle, NodeStyle import sys; #from ete3 import Tree; import argparse from Bio import Entrez import time import json import csv from collections import Counter from ete3 import NCBITaxa import pandas as pd # + # Declares, load newick string #DATA_DICT = {} Entrez.email = "<EMAIL>" TREE_FILE = "../../results/BDNF_Codons.fa_CODON_AWARE_ALN.fasta.treefile" with open(TREE_FILE, "r") as fh: TREE_NEWICK = fh.read() #TREE_NEWICK = "" TREE_NEWICK # + # helper functions # The following looks up NCBI Taxon ID from Accession IDs # Then uses that Taxon ID to get the complete taxnomic lineage of of that species. DATA_DICT = {} #Entrez.email = "<EMAIL>" def main(TREE_NEWICK): global DATA_DICT t = Tree(TREE_NEWICK, format=1) count = 1 for n, BRANCH_NAME in enumerate(t.get_leaf_names()): #BRANCH_NAME = ACCESSION #ACCESSION = ACCESSION.split("-")[0] s = BRANCH_NAME.split("_") ACCESSION = "_".join([s[0], s[1]]) + "." + s[2] print(n+1, "# Checking ACCESION:", ACCESSION) skip = False for i in DATA_DICT.keys(): if ACCESSION == DATA_DICT[i]["ACCESSION"]: skip = True break #end if #end for if skip == True: count += 1 print("# Skipping:", ACCESSION) continue #end if try: handle = Entrez.esummary(db="nucleotide", id=ACCESSION, rettype="gb", retmode="text", retmax=1) records = Entrez.parse(handle) except: print("# error, sleeping") time.sleep(5) handle = Entrez.esummary(db="nucleotide", id=ACCESSION, rettype="gb", retmode="text", retmax=1) records = Entrez.parse(handle) #end try #print("# Number of records returned:", sum(1 for r in records)) try: for record in records: TAXON_ID = record["TaxId"] print(count, "Processing:", str(ACCESSION), str(TAXON_ID)) #break ncbi = NCBITaxa() lineage = ncbi.get_lineage(TAXON_ID) #print(lineage) names = ncbi.get_taxid_translator(lineage) DATA_DICT[str(count)] = {"BRANCH_NAME": BRANCH_NAME, "ACCESSION":ACCESSION, "TAXON_ID": TAXON_ID, "LINEAGE": [names[taxid] for taxid in lineage], "TITLE":record["Title"]} count += 1 #end inner for handle.close except: print("#error repeating") break main(TREE_NEWICK) #end try #end outer for #end method main(TREE_NEWICK) print("#done") # + #Main subroutine # + import pandas as pd df = pd.DataFrame.from_dict(DATA_DICT, orient="index") df #df.to_csv("BDNF_lineages.csv") #df.to_csv("../results/BDNF_lineage.csv") # - df.to_csv("BDNF_lineages.csv") # ## Orders # + # 19 Mammalian Orders # https://culter.colorado.edu/~kittel/WEcol_Handouts/MammalOrders_Sheryn06.pdf # https://en.wikipedia.org/wiki/Mammal_classification #https://byjus.com/biology/mammalia-diversity-in-living-organisms/ # Eutheria # Metatheria # Prototheria # https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=40674 Mammalia_Orders = ["Monotremata", "Afrotheria", "Boreoeutheria", "Litopterna", "Notoungulata", "Xenarthra", "Dasyuromorphia", "Didelphimorphia", "Diprotodontia", "Microbiotheria", "Notoryctemorphia", "Paucituberculata", "Peramelemorphia"] """ Mammalia_Orders = ["Artiodactyla", "Carnivora", "Cetacea", "Chiroptera", "Dermoptera", "Edentata", "Hyracoidae", "Insectivora", "Lagomorpha", "Marsupialia", "Monotremata", "Perissodactyla", "Pholidata", "Pinnipedia", "Primates", "Proboscidea", "Rodentia", "Sirenia", "Tubulidentata"] """ # + order_list = [] for n,item in enumerate(df["LINEAGE"]): #print(n,type(item)) count = 0 for classification in item: #print(classification) if classification in Mammalia_Orders: count += 1 order_list.append(classification) if count > 1: print(n+1, item, classification) #end if #end for if count == 0: order_list.append("NaN") print(n+1, item, classification) #end for df["Order"] = order_list # - df.Order.value_counts() df.Order.nunique() df.Order.shape df.Order.value_counts().plot.pie(startangle=0, legend=False, shadow=True, subplots=True, figsize=(9, 9), autopct='%1.1f%%') # ## Closer Order Mammalia_Orders_Closer = ["Dermoptera", "Glires", "Primates", "Scandentia", "Artiodactyla", "Carnivora", "Chiroptera", "Eulipotyphla", "Perissodactyla", "Pholidota", #ends boroeutheria "Ornithorhynchidae", "Tachyglossidae", # ends Monotremata "Chrysochloridae", "Hyracoidea", "Macroscelidea", "Proboscidea", "Sirenia", "Tenrecidae", "Tubulidentata", # ends Afrotheria "Macraucheniidae", #ends Litopterna "Toxodontidae", # ends Notoungulata "Pilosa", "Cingulata", # ends Xenarthra "Dasyuridae", "Myrmecobiidae", "Thylacinidae", #ends Dasyuromorphia "Didelphidae", # ends Didelphimorphia "Acrobatidae", "Burramyidae", "Macropodidae", "Petauridae", "Phalangeridae", "Phascolarctidae", "Potoroidae", "Pseudocheiridae", "Tarsipedidae", "Vombatidae", # ends Diprotodontia "Microbiotheriidae", #ends Microbiotheria "Notoryctidae", #ends Notoryctemorphia "Caenolestidae",# ends Paucituberculata "Peramelidae", "Peroryctidae" #ends Peramelemorphia ] # + order_list = [] for n, item in enumerate(df["LINEAGE"]): #print(n,type(item)) count = 0 for classification in item: #print("## TRYING:", n+1, classification) #print(classification) if classification in Mammalia_Orders_Closer: count += 1 order_list.append(classification) #print("## MATCH:", n+1, item, classification) if count > 1: print("## DOUBLE:", n+1, item, classification) #end if #end for if count == 0: #print("## NO MATCH:", n+1, item, classification) order_list.append("NaN") print() #end for df["Order"] = order_list df.Order.value_counts().plot.pie(startangle=0, legend=False, shadow=True, subplots=True, figsize=(9, 9), autopct='%1.1f%%') # - df.Order.value_counts() import numpy as np for item in df.Order.unique(): print(item) df3 = df[df["Order"] == item] #print(df3.shape) output = "" + item + ".txt" # get the taxa names np.savetxt(output, df3.BRANCH_NAME.values, fmt='%s') # + # save it #import numpy as np #df3 = df[df[]] #np.savetxt("../results/suborder.txt", df.Order.values, fmt='%s') # - # ## Break down Primates def classify(data, GROUPS): order_list = [] for n, item in enumerate(data): count = 0 for classification in item: if classification in GROUPS: count += 1 order_list.append(classification) #print("## MATCH:", n+1, item, classification) if count > 1: print("## DOUBLE:", n+1, item, classification) #end if #end for if count == 0: #print("## NO MATCH:", n+1, item, classification) order_list.append("NaN") print() #end if #end for return order_list #end method # + df2 = df[df["Order"] == "Primates"] Primates_SubOrder = ["Haplorrhini", "Strepsirrhini"] df2["SubOrder"] = classify(df2["LINEAGE"], Primates_SubOrder) #df2["SubOrder"].value_counts().plot() df2["SubOrder"].value_counts().plot.pie() # + df2 = df[df["Order"] == "Primates"] Primates_SubOrder = ["Simiiformes", "Tarsiiformes", "Chiromyiformes", "Lemuriformes", "Lorisiformes"] df2["SubOrder"] = classify(df2["LINEAGE"], Primates_SubOrder) #df2["SubOrder"].value_counts().plot() df2["SubOrder"].value_counts().plot.pie() # + df2 = df[df["Order"] == "Primates"] Primates_SubOrder = ["Catarrhini", "Platyrrhini", "Tarsiidae", "Daubentoniidae", "Cheirogaleidae", "Indriidae", "Lemuridae", "Lepilemuridae", "Paleopropithecidae", "Galagidae", "Lorisidae"] df2["SubOrder"] = classify(df2["LINEAGE"], Primates_SubOrder) #df2["SubOrder"].value_counts().plot() df2["SubOrder"].value_counts().plot.pie() # - # ## Tree manipulation, changing the leaf filenames # #### Mostly, change accession to Taxon ID. Useful for iTOL. TREE_NEWICK_TAXONIDs = TREE_NEWICK TREE_NEWICK_TAXONIDs # + t = Tree(TREE_NEWICK_TAXONIDs); count = 1 for ACCESSION in t.get_leaf_names(): #print("# Checking", ACCESSION) for i in DATA_DICT.keys(): #print(i, DATA_DICT[i]["ACCESSION"]) if ACCESSION == DATA_DICT[i]["ACCESSION"]: #print("\t", DATA_DICT[i]["TAXON_ID"]) TREE_NEWICK_TAXONIDs = TREE_NEWICK_TAXONIDs.replace(str(ACCESSION), str(DATA_DICT[i]["TAXON_ID"])) if ACCESSION in TREE_NEWICK_TAXONIDs: print("\t", "match found") #end if #end for #end outer for print("# Done") # - type(TREE_NEWICK_TAXONIDs) TREE_NEWICK_TAXONIDs # + TREE_NEWICK_SpeciesNames = TREE_NEWICK t = Tree(TREE_NEWICK_SpeciesNames); count = 1 for ACCESSION in t.get_leaf_names(): #print("# Checking", ACCESSION) for i in DATA_DICT.keys(): #print(i, DATA_DICT[i]["ACCESSION"]) if ACCESSION == DATA_DICT[i]["ACCESSION"]: #print("\t", DATA_DICT[i]["LINEAGE"][-1]) TREE_NEWICK_SpeciesNames = TREE_NEWICK_SpeciesNames.replace(str(ACCESSION), str(DATA_DICT[i]["LINEAGE"][-1])) #if ACCESSION in TREE_NEWICK_TAXONIDs: print("\t", "match found") #end if #end for #end outer for print(TREE_NEWICK_SpeciesNames) # + TREE_NEWICK_SpeciesNames_withAnnotation = TREE_NEWICK t = Tree(TREE_NEWICK_SpeciesNames_withAnnotation); count = 1 for ACCESSION in t.get_leaf_names(): for i in DATA_DICT.keys(): if ACCESSION == DATA_DICT[i]["ACCESSION"]: LINEAGE = DATA_DICT[i]["LINEAGE"][-6] #TREE_NEWICK_SpeciesNames_withAnnotation = TREE_NEWICK_SpeciesNames.replace(str(ACCESSION), str(DATA_DICT[i]["LINEAGE"][-1])) TREE_NEWICK_SpeciesNames_withAnnotation = TREE_NEWICK_SpeciesNames_withAnnotation.replace(str(ACCESSION), str(DATA_DICT[i]["LINEAGE"][-1]) + "{" + LINEAGE +"}") #end if #end for #end outer for TREE_NEWICK_SpeciesNames_withAnnotation = TREE_NEWICK_SpeciesNames_withAnnotation.replace(" ", "") print(TREE_NEWICK_SpeciesNames_withAnnotation) # -
scripts/posthocs/EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Geometric Distribution # *** # ## Definition # >The Geometric distribution is a discrete distribution and gives the probability that the first occurrence of success requires k independent trials [a.k.a. Bernoulli trials], each with success probability p. $ ^{[1]}$. # # ## Formula # The probability mass function of a Geometric distributed random variable is defined as: # $$ Geom(k|p) = (1-p)^{k-1}p $$ # where $p$ denotes the probability of success in a Bernoulli trial. # + # IMPORTS import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import matplotlib.style as style from IPython.core.display import HTML # PLOTTING CONFIG # %matplotlib inline style.use('fivethirtyeight') plt.rcParams["figure.figsize"] = (14, 7) HTML(""" <style> .output_png { display: table-cell; text-align: center; vertical-align: center; } </style> """) plt.figure(dpi=100) # PDF plt.bar(x=np.arange(10), height=(stats.geom.pmf(np.arange(10), p=.5)/np.max(stats.geom.pmf(np.arange(10), p=.5))), width=.75, alpha=0.75 ) # CDF plt.plot(np.arange(10), stats.geom.cdf(np.arange(10), p=.5), color="#fc4f30", ) # LEGEND plt.text(x=3.5, y=.3, s="pmf (normed)", alpha=.75, weight="bold", color="#008fd5") plt.text(x=2.5, y=.7, s="cdf", alpha=.75, weight="bold", color="#fc4f30") # TICKS plt.xticks(range(11)) plt.tick_params(axis = 'both', which = 'major', labelsize = 18) plt.axhline(y = 0.005, color = 'black', linewidth = 1.3, alpha = .7) # TITLE, SUBTITLE & FOOTER plt.text(x = -1.5, y = 1.25, s = "Geometric Distribution - Overview", fontsize = 26, weight = 'bold', alpha = .75) plt.text(x = -1.5, y = 1.1, s = 'Depicted below are the normed probability mass function (pmf) and the cumulative density\nfunction (cdf) of a Geometric distributed random variable $ y \sim Geom(p) $, given parameter $p =0.5 $.', fontsize = 19, alpha = .85) plt.text(x = -1.5,y = -0.125, s = 'Geometric', fontsize = 14, color = '#f0f0f0', backgroundcolor = 'grey'); # - # *** # ## Parameters # + # IMPORTS import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import matplotlib.style as style from IPython.core.display import HTML # PLOTTING CONFIG # %matplotlib inline style.use('fivethirtyeight') plt.rcParams["figure.figsize"] = (14, 7) HTML(""" <style> .output_png { display: table-cell; text-align: center; vertical-align: center; } </style> """) plt.figure(dpi=100) # PDF P = .2 plt.scatter(np.arange(11), (stats.geom.pmf(np.arange(11), p=.2)), alpha=0.75, s=100 ) plt.plot(np.arange(11), (stats.geom.pmf(np.arange(11), p=.2)), alpha=0.75, ) # PDF P = .5 plt.scatter(np.arange(11), (stats.geom.pmf(np.arange(11), p=.5)), alpha=0.75, s=100 ) plt.plot(np.arange(11), (stats.geom.pmf(np.arange(11), p=.5)), alpha=0.75, ) # PDF P = .9 plt.scatter(np.arange(11), (stats.geom.pmf(np.arange(11), p=.9)), alpha=0.75, s=100 ) plt.plot(np.arange(11), (stats.geom.pmf(np.arange(11), p=.9)), alpha=0.75, ) # LEGEND plt.text(x=4.25, y=.15, s="$p = 0.2$", alpha=.75, weight="bold", color="#008fd5") plt.text(x=2.5, y=.25, s="$p = 0.5$", alpha=.75, weight="bold", color="#fc4f30") plt.text(x=1.5, y=.7, s="$p = 0.9$", alpha=.75, weight="bold", color="#e5ae38") # TICKS plt.xticks(range(11)) plt.tick_params(axis = 'both', which = 'major', labelsize = 18) plt.axhline(y = 0, color = 'black', linewidth = 1.3, alpha = .7) # TITLE, SUBTITLE & FOOTER plt.text(x = -1, y = 1.125, s = "Geometric Distribution - $p$", fontsize = 26, weight = 'bold', alpha = .75) plt.text(x = -1, y = 1, s = 'Depicted below are three Geometric distributed random variables with varying $p $. As one can\nsee the parameter $p$ flattens the distribution (the larger p the sharper the distribution).', fontsize = 19, alpha = .85) plt.text(x = -1,y = -0.175, s = 'Geometric', fontsize = 14, color = '#f0f0f0', backgroundcolor = 'grey'); # - # *** # ## Implementation in Python # Multiple Python packages implement the Geometric distribution. One of those is the `stats.geom` module from the `scipy` package. The following methods are only an excerpt. For a full list of features the [official documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.geom.html) should be read. # ### Random Variates # In order to generate a random sample from, the function `rvs` should be used. # + import numpy as np from scipy.stats import geom # draw a single sample np.random.seed(42) print(geom.rvs(p=0.3), end="\n\n") # draw 10 samples print(geom.rvs(p=0.3, size=10), end="\n\n") # - # ### Probability Mass Function # The probability mass function can be accessed via the `pmf` function (mass instead of density since the Geometric distribution is discrete). Like the `rvs` method, the `pdf` allows for adjusting the $p$ of the random variable: # + import numpy as np from scipy.stats import geom # additional imports for plotting purpose import numpy as np import matplotlib.pyplot as plt # %matplotlib inline plt.rcParams["figure.figsize"] = (14,7) # likelihood of x and y x = 1 y = 7 print("pmf(X=1) = {}\npmf(X=7) = {}".format(geom.pmf(k=x, p=0.3), geom.pmf(k=y, p=0.3))) # continuous pdf for the plot x_s = np.arange(11) y_s = geom.pmf(k=x_s, p=0.3) plt.scatter(x_s, y_s, s=100); # - # ### Cumulative Probability Density Function # The cumulative probability density function is useful when a probability range has to be calculated. It can be accessed via the `cdf` function: # + from scipy.stats import geom # probability of x less or equal 0.3 print("P(X <=3) = {}".format(geom.cdf(k=3, p=0.3))) # probability of x in [-0.2, +0.2] print("P(2 < X <= 8) = {}".format(geom.cdf(k=8, p=0.3) - geom.cdf(k=2, p=0.3))) # - # *** # ## Infering $p$ # Given a sample of datapoints it is often required to estimate the "true" parameters of the distribution. In the case of the Geometric distribution this estimation is quite simple. $p$ can be derived by calculating the reciprocal of the sample's mean. # + # IMPORTS from collections import Counter import numpy as np from scipy.stats import geom import matplotlib.pyplot as plt import matplotlib.style as style from IPython.core.display import HTML # PLOTTING CONFIG # %matplotlib inline style.use('fivethirtyeight') plt.rcParams["figure.figsize"] = (14, 7) HTML(""" <style> .output_png { display: table-cell; text-align: center; vertical-align: center; } </style> """) plt.figure(dpi=100) ##### COMPUTATION ##### # DECLARING THE "TRUE" PARAMETERS UNDERLYING THE SAMPLE p_real = 0.3 # DRAW A SAMPLE OF N=100 np.random.seed(42) sample = geom.rvs(p=p_real, size=100) # ESTIMATE P p_est = 1.0/np.mean(sample) print("Estimated p: {}".format(p_est)) ##### PLOTTING ##### # SAMPLE DISTRIBUTION cnt = Counter(sample) cnt[0] = 0 # added to fit pmf _, values = zip(*sorted(cnt.items())) plt.bar(range(len(values)), values/np.sum(values), alpha=0.25); # TRUE CURVE plt.plot(range(18), geom.pmf(k=range(18), p=p_real), color="#fc4f30") # ESTIMATED CURVE plt.plot(range(18), geom.pmf(k=range(18), p=p_est), color="#e5ae38") # LEGEND plt.text(x=2, y=.06, s="sample", alpha=.75, weight="bold", color="#008fd5") plt.text(x=6.5, y=.075, s="true distrubtion", rotation=-15, alpha=.75, weight="bold", color="#fc4f30") plt.text(x=2, y=.275, s="estimated distribution", rotation=-60, alpha=.75, weight="bold", color="#e5ae38") # TICKS plt.xticks(range(17)[::2]) plt.tick_params(axis = 'both', which = 'major', labelsize = 18) plt.axhline(y = 0.002, color = 'black', linewidth = 1.3, alpha = .7) # TITLE, SUBTITLE & FOOTER plt.text(x = -2.5, y = 0.425, s = "Geometric Distribution - Parameter Estimation", fontsize = 26, weight = 'bold', alpha = .75) plt.text(x = -2.5, y = 0.375, s = 'Depicted below is the distribution of a sample (blue) drawn from a Geometric distribution with\n$p = 0.3$ (red). Also the estimated distrubution with $p \sim {:.3f}$ is shown (yellow).'.format(np.mean(sample)), fontsize = 19, alpha = .85) plt.text(x = -2.5,y = -0.04, s = 'Geometric', fontsize = 14, color = '#f0f0f0', backgroundcolor = 'grey'); # - # ## Infering $p$ - MCMC # In addition to a direct estimation from the sample $p$ can also be estimated using Markov chain Monte Carlo simulation - implemented in Python's [PyMC3](https://github.com/pymc-devs/pymc3). # + # IMPORTS from collections import Counter import numpy as np from scipy.stats import geom import matplotlib.pyplot as plt import matplotlib.style as style from IPython.core.display import HTML import pymc3 as pm # PLOTTING CONFIG # %matplotlib inline style.use('fivethirtyeight') plt.rcParams["figure.figsize"] = (14, 7) HTML(""" <style> .output_png { display: table-cell; text-align: center; vertical-align: center; } </style> """) plt.figure(dpi=100) ##### COMPUTATION ##### # DECLARING THE "TRUE" PARAMETERS UNDERLYING THE SAMPLE p_real = 0.3 # DRAW A SAMPLE OF N=1000 np.random.seed(42) sample = geom.rvs(p=p_real, size=100) ##### SIMULATION ##### # MODEL BUILDING with pm.Model() as model: p = pm.Uniform("p") geometric = pm.Geometric("geometric", p=p, observed=sample) # MODEL RUN with model: step = pm.Metropolis() trace = pm.sample(100000, step=step) burned_trace = trace[50000:] # P - 95% CONF INTERVAL ps = burned_trace["p"] ps_est_95 = ps.mean() - 2*ps.std(), ps.mean() + 2*ps.std() print("95% of sampled ps are between {:0.3f} and {:0.3f}".format(*ps_est_95)) ##### PLOTTING ##### # SAMPLE DISTRIBUTION cnt = Counter(sample) cnt[0] = 0 # added to fit pmf _, values = zip(*sorted(cnt.items())) plt.bar(range(len(values)), values/np.sum(values), alpha=0.25); # TRUE CURVE plt.plot(range(18), geom.pmf(k=range(18), p=p_real), color="#fc4f30") # ESTIMATED CURVE plt.plot(range(18), geom.pmf(k=range(18), p=ps.mean()), color="#e5ae38") # LEGEND plt.text(x=2, y=.06, s="sample", alpha=.75, weight="bold", color="#008fd5") plt.text(x=6.5, y=.075, s="true distrubtion", rotation=-15, alpha=.75, weight="bold", color="#fc4f30") plt.text(x=2, y=.275, s="estimated distribution", rotation=-60, alpha=.75, weight="bold", color="#e5ae38") # TICKS plt.xticks(range(17)[::2]) plt.tick_params(axis = 'both', which = 'major', labelsize = 18) plt.axhline(y = 0.002, color = 'black', linewidth = 1.3, alpha = .7) # TITLE, SUBTITLE & FOOTER plt.text(x = -2.5, y = 0.425, s = "Geometric Distribution - Parameter Estimation (MCMC)", fontsize = 26, weight = 'bold', alpha = .75) plt.text(x = -2.5, y = 0.375, s = 'Depicted below is the distribution of a sample (blue) drawn from a Geometric distribution with\n$p = 0.3$ (red). Also the estimated distrubution with $p \sim {:.3f}$ is shown (yellow).'.format(ps.mean()), fontsize = 19, alpha = .85) plt.text(x = -2.5,y = -0.04, s = 'Geometric', fontsize = 14, color = '#f0f0f0', backgroundcolor = 'grey'); # - # *** # [1] - [Wikipedia. Geometric Distribution](https://en.wikipedia.org/wiki/Geometric_distribution)
Mathematics/Statistics/Statistics and Probability Python Notebooks/Important-Statistics-Distributions-py-notebooks/Geometric Distribution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"></ul></div> # - # La covarianza es una medida que indica en qué grado dos variables aleatorias varían de forma conjunta respecto a sus medias. Se puede calcular mediante la siguiente fórmula: # ![cov.PNG](attachment:cov.PNG) # En esencia, nos permite conocer de qué manera evoluciona una variable en función de lo que hace otra variable, pudiendo tomar los siguiente valores: # # - Covarianza(X,Y)=0: no hay relación entre las variables X e Y. # # - Covarianza(X,Y)<0: cuando X aumenta, Y disminuye. Hay una relación negativa. # # - Covarianza(X,Y)>0: cuando X aumenta, Y aumenta. Hay una relación positiva. # La covarianza tiene las siguientes propiedades: # - Cov(X,b)=0, donde b es una constante. # - Cov(X,X)=Var(X) # - Cov(X,Y)=Cov(Y,X) # - Cov(b·X,c·Y)=c·b·Cov(X,Y) # - Cov(b+X,c+Y)=Cov(X,Y) # Es importante tener en cuenta que las covarianzas de distintas variables no son comparables, puesto que el valor de la covarianza es un valor absoluto que depende de la unidad de medida de las variables. import numpy as np X = np.array([0, 2, 5, 7, 9]) Y = np.array([2, 4, 6, 8, 12]) X_media = sum(X)/len(X) X_media Y_media = sum(Y)/len(Y) Y_media numerador = (0-X_media)+(2-Y_media) + (2-X_media)*(4-Y_media) + (5-X_media)*(6-Y_media) + (7-X_media)*(8-Y_media) + (9-X_media)*(12-Y_media) numerador n = len(X) n cov_xy = numerador / n cov_xy import matplotlib.pyplot as plt plt.plot(X) plt.plot(Y) plt.plot(X,Y) cov_xy_insesgada = numerador / (n-1) cov_xy_insesgada np.cov(X, Y, rowvar=True)
notebooks/code/7 - Covarianza - Code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analysis Ready Data Tutorial Part 1: Introduction and Best Practices # # Time-series analysis (e.g. change detection and trend detection) is a powerful application of satellite imagery. However, a great deal of processing is required to prepare imagery for analysis. Analysis Ready Data (ARD), preprocessed time-series stacks of overhead imagery, allow for time-series analysis without any additional processing of the imagery. See [Analysis Data Defined](https://medium.com/planet-stories/analysis-ready-data-defined-5694f6f48815) for an excellent introduction and discussion on ARD. # # This tutorial shows how [Planet APIs](https://developers.planet.com/docs/apis/) can simplify production of ARD by demonstrating best practices and then by walking through a real world use case. This tutorial is targeted to users who have little to no geospatial knowledge but have experience working with APIs. The goal of this tutorial is to teach the user the how and whys of using the Data and Orders APIs to create and interpret ARD for both use cases. This first part of the totorial focuses on best practices. The following part will focus on a real-world use case. # # # ## APIs # # The two Planet APIs that are used in creation of ARD are the [Data API](https://developers.planet.com/docs/data/) and the [Orders API](https://developers.planet.com/docs/orders/). The Data API is used to search for imagery according to certain criteria based on the use case, and the Orders API is used to process that imagery into ARD that can be fed directly into time-series analysis. # # ### Data API # # The first step in using the Data API is identifying the search criteria. This is specifying answers to the following questions regarding the use case time-series analysis: # * What is the time range? # * What product item type is desired? # * What is the area of interest (geographic region)? # * What percentage of pixels need to be usable? # * etc. # # While time range is likely pretty trivial to determine, product item, area of interest, and usable pixels may take a little bit of work. Let's dive into each further # # #### Product Item Type # # The [product item type](https://developers.planet.com/docs/data/items-assets/) refers to the sensor source (aka satellite type) and basic processing desired. This decision is highly dependent on application, as coverage, revisit rate, spectral bands, and resolution differ between products. A good overview of the products available in the Planet Data API is provided on the [Planet Imagery and Archive](https://www.planet.com/products/planet-imagery/) page (look for the link to product specs for details). For most frequent revisit rate, we will use the PS satellite. Experience has shown that customers most often use the scene (vs the orthotile) product. Therefore, this tutorial will focus on the `PSScene4Band` product. # # #### Area of Interest # # The area of interest is the geographic region for the analysis, given as GeoJSON. If you are familiar with JSON, the format of GeoJSON will likely be easy to grasp. According to <geojson.org>, "GeoJSON is a format for encoding a variety of geographic data structures." The specific geographic data structure we are interested is a `Polygon`. The [GeoJSON wikipedia page](https://en.wikipedia.org/wiki/GeoJSON) gives some great examples of the data structures for the various GeoJSON data structures. # # Some care needs to be given to describing the [position](https://tools.ietf.org/html/rfc7946#section-3.1.1) for each coordinate in a GeoJSON geometry. Each position is specified as `(longitude, latitude)` or `(easting, northing)` (order really matters here!). Also, this may be a surprise, but the same point on earth can have different `(longitude, latitude)` values based on the spatial reference system. Basically, a spatial reference system describes where something is in the real world. But there are thousands of different spatial reference systems. These are separated into two categories: geographic and projected. Geographic coordinate systems model the earth as an ellipsoid (this model is called the `datum`) and describe positions on the surface of the earth (coordinates) in terms of the prime meridian and angle of measure. Projected coordinate systems take this a step further by projecting the geographic coordinates (quite three-dimensional) into two dimensions (the `projection`). Different projections preserve different properties, such as area, angles, or direction for north. There is a rich area of discovery, discussion, and even a little [teasing](https://xkcd.com/977/) (thanks xkcd!) in the world of spatial reference systems. # # GeoJSON only supports one spatial reference system, [WGS84](https://spatialreference.org/ref/epsg/wgs-84/). This is a geographic coordinate system describing locations in latitude, longitude. However, many web mapping applications use the Web Mercator projected coordinate system to to describe locations. Confusingly, Web Mercator also describes locations in latitude, longitude. But a `(longitude, latitude)` GeoJSON position given in Web Mercator will **not** end up where you expect if it is not first projected into WGS84. The easiest way to define an aoi that is described in WGS84 is to draw it in [Planet Explorer](https://www.planet.com/explorer) and click the little button "Download AOI as GeoJSON." # # #### Usable Pixels # # Taking pictures of the earth from space ain't easy. There are a lot of steps and a lot of things have to go right to get a clear shot. Therefore, not every pixel in every image taken from space is useful for analysis. For one, images taken from space are projected into a spatial reference system (discussed briefly above), a process that introduces some NoData (aka outside of the image footprint) pixels into the resulting image. Additionally, clouds cover a great deal of the earth and create cloudy pixels when imaged. While some applications can use cloudy pixels, others cannot. Therefore, the type of pixels that are determined to be 'usable' are often application-specific. To support definition of usable pixels, and filtering based on that definition, Planet provides Usable Data Masks along with Usable Data entries in the imagery metadata. For more details on the Usable Data Mask, check out [Clear for Analysis with Planet’s New Usable Data Masks](https://www.planet.com/pulse/planets-new-usable-data-masks/). You can also find great examples for working with Usable Data Masks in the [UDM2 notebooks](https://github.com/planetlabs/notebooks/tree/master/jupyter-notebooks/udm2). For more information on the Usable Data metadata entries, see [Usable Data in Planet imagery](https://developers.planet.com/planetschool/usable-data-in-planet-imagery/). # # ### Orders API # # The core decision around using the orders api is which [product bundle](https://developers.planet.com/docs/orders/product-bundles-reference/) to use. This is the starting point for all processing and there are a lot of options. Once the product is determined, the processing steps (aka tools and toolchains) are defined. Finally, the logistics of the delivery of the imagery are ironed out. # # #### Product Bundle # # To enable time-series analysis, ARD imagery must be processed so that imagery is consistant across days, months, and possibly years. This means correcting for differences in camera sensitivities, the relative location of the sun, and the atmospheric conditions. The Analytic Radiance (`analytic`) product bundle provides imagery corrected for difference in camera sensitivities and location of the sun (as radiance) and can also remove the effect of the sun's spectrum by applying the `reflectanceCoefficient` value given in the imagery metadata. However, the Analytic Surface Reflectance (`analytic_sr`) product bundle removes the effect of atmospheric conditions while also converting to reflectance. Therefore, the Analytic Surface Reflectance is the ideal product for ARD and the one we will use here. # # #### Tools and Toolchains # # The [Tools and Toolchains](https://developers.planet.com/docs/orders/tools-toolchains/) functionality in the Orders API are the key to seamlessly creating ARD. Through the API, one can define the pre-processing steps for the data before it is delivered. Given proper definition of the tools and toolchains, data that is delivered is analysis-ready. # # #### Delivery # # There are a few options for delivery that cater to different use cases. Imagery can be downloaded directly or delivered to [cloud storage](https://developers.planet.com/docs/orders/ordering-delivery/#delivery-to-cloud-storage). When imagery is downloaded, the user can poll for when the order is ready or notifications ([e-mail](https://developers.planet.com/docs/orders/ordering-delivery/#email-notification) or [webhooks](https://developers.planet.com/docs/orders/ordering-delivery/#using-webhooks)) can be used. Additionally, the imagery can be delivered as a [zip archive](https://developers.planet.com/docs/orders/ordering-delivery/#zipping-results). # # ## Best Practices # # Now that we have a basic understanding of the Data and Orders APIs, let's put them to use creating ARD. This will be a demonstration of best practices. # # For this tutorial we will use the [planet python client](https://github.com/planetlabs/planet-client-python) ([documentation](https://planetlabs.github.io/planet-client-python/index.html)). This client simplifies interactions with the various Planet APIs and also includes a command-line interface. # # The first step in using the planet python client is initializing the client with the user API key. Each user (or organization) has their own unique API key. Information on finding the API key and running Docker so that the API key is available in the notebooks is given in this repository's [README](https://github.com/planetlabs/notebooks#install-and-use-these-notebooks). # # The next step is building functionality for searching the Data API with the client. This consists of building the search query and then running the search. In building this functionality, we will use test information for data ranges and AOIs to test the functionality, but we will build the functions so those pieces can be changed by the end user. Fo this step we will demonstrate use of both the python api and the cli. # # The third step is building functionality for processing and delivery with the Orders API. At the time of the creation of this tutorial (June 2019), the Orders API functionality best supported (e.g. [documented](https://planetlabs.github.io/planet-client-python/cli/examples.html#orders-examples)) in the command-line interface (CLI). Therefore, we will use the client CLI for this portion of the tutorial. # # Finally, once we have downloaded the order, we will unzip it and visualize the resulting imagery. # # # To summarize, these are the steps: # 1. [Initialize API client](#Step-1:-Initialize-API-client) # 1. [Search Data API](#Step-2:-Search-Data-API) # 1. [Submit Order](#Step-3:-Submit-Order) # 1. [Download Order](#Step-4:-Download-Order) # 1. [Unzip and View Order](#Step-5:-Unzip-and-View-Order) # # #### Import Dependencies # + import datetime import json import os from pathlib import Path from pprint import pprint import time from zipfile import ZipFile import numpy as np from planet import api from planet.api import filters import rasterio from rasterio import plot from shapely.geometry import MultiPolygon, shape # - # #### Step 1: Initialize API client # + # if your Planet API Key is not set as an environment variable, you can paste it below API_KEY = os.environ.get('PL_API_KEY', 'PASTE_YOUR_KEY_HERE') client = api.ClientV1(api_key=API_KEY) # - # #### Step 2: Search Data API # # The goal of this step is to get the scene ids that meet the search criteria for this use case. # + # define test data for the filter test_start_date = datetime.datetime(year=2019,month=4,day=1) test_stop_date = datetime.datetime(year=2019,month=5,day=1) # iowa crops aoi test_aoi_geom = { "type": "Polygon", "coordinates": [ [ [-93.299129, 42.699599], [-93.299674, 42.812757], [-93.288436, 42.861921], [-93.265332, 42.924817], [-92.993873, 42.925124], [-92.993888, 42.773637], [-92.998396, 42.754529], [-93.019154, 42.699988], [-93.299129, 42.699599] ] ] } # + # create an api request from the search specifications def build_request(aoi_geom, start_date, stop_date): '''build a data api search request for clear PSScene4Band imagery''' item_type = 'PSScene4Band' query = filters.and_filter( filters.geom_filter(aoi_geom), filters.range_filter('clear_percent', gte=90), filters.date_range('acquired', gt=start_date), filters.date_range('acquired', lt=stop_date) ) return filters.build_search_request(query, ['PSScene4Band']) request = build_request(test_aoi_geom, test_start_date, test_stop_date) print(request) # + # search the data api def search_data_api(request, client, limit=500): result = client.quick_search(request) # this returns a generator return result.items_iter(limit=limit) items = list(search_data_api(request, client)) print(len(items)) # + # check out an item just for fun # pprint(items[0]) # - # visualize a scene footprint footprints = [shape(i['geometry']) for i in items] footprints[0] # visualize subset of footprints and aoi MultiPolygon([shape(test_aoi_geom), *footprints[:5]]) # As we can see, the footprints (rectangles) do not exactly match the AOI. Indeed, none of them cover the AOI. We don't care about pixels outside of the AOI, so we are going to want to clip the imagery to the AOI (to remove pixels outside the AOI). # #### Step 3: Submit Order # # Now that we have the scene ids, we can create the order. The output of this step is a single zip file that contains all of the scenes that meet our criteria. # # Because this is a demo, we don't download all of the scene ids. # work with just a subset of the items in the interest of bandwidth test_items = items[:2] # filter to item ids ids = [i['id'] for i in test_items] ids # specify the psscene4band surface reflectance product # make sure to get the *_udm2 bundle so you get the udm2 product # note: capitalization really matters in item_type when using planet client orders api item_type = 'PSScene4Band' bundle = 'analytic_sr_udm2' # ##### Step 3.1: Build Orders Toolchain # + # specify tools # clip to AOI clip_tool = {'clip': {'aoi': test_aoi_geom}} # convert to NDVI bandmath_tool = {'bandmath': { "pixel_type": "32R", "b1": "(b4 - b3) / (b4+b3)" }} tools = [clip_tool, bandmath_tool] pprint(tools) # - # specify a name name = 'tutorial_order' # ##### Step 3.2: Submit Order # # ###### Option 1: Client Python API # # The first option for submitting an order is using the planet client python api. # + orders_request = { 'name': name, 'products': [{ 'item_ids': ids, 'item_type': item_type, 'product_bundle': bundle }], 'tools': tools, 'delivery': { 'single_archive': True, 'archive_filename':'{{name}}_{{order_id}}.zip', 'archive_type':'zip' }, 'notifications': { 'email': False }, } # pprint(orders_request, indent=4) # + order_info = client.create_order(orders_request).get() order_id = order_info['id'] order_id # - # ###### Option 2: Client CLI # # In some instances, using the CLI to submit orders may be desired. # + # zip up entire order into one file ziptype = 'order' # format the ids for use with the CLI cli_ids = ','.join([i['id'] for i in test_items]) # save tools definition to file tools_file = 'tools.json' with open(tools_file, 'w') as dst: dst.write(json.dumps(tools)) order_info_file = 'order.json' # - # submit the order and save the response to a file so we can get the order id # !set -x;planet orders create \ # --id $cli_ids \ # --item-type $item_type \ # --bundle $bundle \ # --zip $ziptype \ # --tools $tools_file \ # --name $name | tee $order_info_file # + # read the order id with open(order_info_file, 'r') as src: order_info = json.load(src) order_id = order_info['id'] order_id # - # #### Step 4: Download Order # # To download the order from the orders api, we will use the planet python client CLI. It would be nice to use the python client python api for this step but, as of the writing of this tutorial, support for the orders api in the planet client python api has been [confusing](https://github.com/planetlabs/planet-client-python/issues/217). The CLI download status output is also [confusing and possibly wrong](https://github.com/planetlabs/planet-client-python/issues/218) but the CLI does easily and successfully download the order. # # When we download an order, we always get a `manifest.json` file. Therefore, while we only ordered one file (an order zip), we will download two files. The manifest is [very useful](https://developers.planet.com/docs/orders/ordering-delivery/#why-you-should-depend-on-the-manifest-file) and we will use it to locate the zip file we ordered and downloaded. # # ##### Step 4.1: Wait Until Order is Successful # # Before we can download the order, the order has to be prepared on the server. Though it would be ideal if the CLI download functionality took care of this, right now it will [error out](https://github.com/planetlabs/planet-client-python/issues/219) if the order hasn't already been run successfully. So, for now, we will do our own polling and waiting until the order is successful. # + def poll_for_success(order_id, client, num_loops=50): count = 0 while(count < num_loops): count += 1 order_info = client.get_individual_order(order_id).get() state = order_info['state'] print(state) success_states = ['success', 'partial'] if state == 'failed': raise Exception(response) elif state in success_states: break time.sleep(10) poll_for_success(order_id, client) # - # ##### Step 4.2: Run Download # # For this step we will use the orders CLI because it is the easiest and best supported way to download via the planet client for now. # # One thing to watch out for: if you have already downloaded an order, it won't be available for re-download. You will have to resubmit the order and get a new order id. What is tricky is that the manifest is still downloaded, but the other files are not. # + demo_data_dir = os.path.join('data', 'demo') # make the download directory if it doesn't exist Path(demo_data_dir).mkdir(parents=True, exist_ok=True) # - # !set -x;planet orders download --dest $demo_data_dir $order_id # ##### Step 4.3: Get Downloaded File Location(s) # # We use the downloaded order manifest to find the downloaded file locations. The manifest is saved in the download directory. # !ls data/demo # + def get_download_locations(download_dir): manifest_file = os.path.join(download_dir, 'manifest.json') with open(manifest_file, 'r') as src: manifest = json.load(src) # uncomment to see the manifest # pprint(manifest) locations = [os.path.join(download_dir, f['path']) for f in manifest['files']] return locations locations = get_download_locations(demo_data_dir) pprint(locations) # - # lets just double check to see if the zip file got downloaded # !ls data/demo # #### Step 5: Unzip and View Order # # In this step we will simply unzip the order and view the downloaded images and their usable data masks. # # ##### 5.1: Unzip Order # # We will unzip the order into a directory named after the file, then we will find the downloaded files (they are in a `files` subdirectory) # + def unzip(filename): location = Path(filename) zipdir = location.parent / location.stem with ZipFile(location) as myzip: myzip.extractall(zipdir) return zipdir zipdir = unzip(locations[0]) zipdir # + def get_unzipped_files(zipdir): filedir = zipdir / 'files' filenames = os.listdir(filedir) return [filedir / f for f in filenames] file_paths = get_unzipped_files(zipdir) pprint(file_paths) # - # ##### 5.2: Visualize Images # # In this section we will find the image files and their associated UDMs and we will visualize them. # # The first band of the UDM2 file is the clear/not-clear band. 0: not-clear, 1: clear. # + def get_image_and_udm_files(file_paths): files = [str(p) for p in file_paths] # the image files are tiffs and are identified with '_SR_' in the name img_id = '_AnalyticMS_SR_' imgfiles = [f for f in files if f.endswith('.tif') and img_id in f] # get associated udm files for image files # each image has a unique id at the beginning of the name imgroots = [str(f).split(img_id)[0] for f in imgfiles] # the udm files are identified with '_udm2' in the name udmfiles = [next(f for f in files if f.startswith(r + '_udm2')) for r in imgroots] return imgfiles, udmfiles imgfiles, udmfiles = get_image_and_udm_files(file_paths) pprint(imgfiles) pprint(udmfiles) # + # read UDM2 file def read_notclear(udm2_filename): with rasterio.open(udm2_filename) as img: # the first band is the clear/not clear band mask=img.read(1) not_clear = mask == 0 return not_clear udmfile = udmfiles[0] not_clear = read_notclear(udmfile) # + # there is an issue where some udms aren't the same size as the images # to deal with this just cut off any trailing rows/columns # this isn't ideal as it can result in up to one pixel shift in x or y direction def crop(img, shape): return img[:shape[0], :shape[1]] def read_ndvi(img_filename, not_clear): with rasterio.open(imgfile) as img: # ndvi is a single-band image band = img.read(1) # crop image and mask to same size img_shape = min(band.shape, not_clear.shape) ndvi = np.ma.array(crop(band, img_shape), mask=crop(not_clear, img_shape)) return ndvi imgfile = imgfiles[0] ndvi = read_ndvi(imgfile, not_clear) # + # set up NDVI visualization # copied from: https://stackoverflow.com/a/48598564 import matplotlib.pyplot as plt import matplotlib.colors as colors """ The NDVI values will range from -1 to 1. You want to use a diverging color scheme to visualize the data, and you want to center the colorbar at a defined midpoint. The class below allows you to normalize the colorbar. """ class MidpointNormalize(colors.Normalize): """ Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value) e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100)) Credit: <NAME>, http://chris35wills.github.io/matplotlib_diverging_colorbar/ Credit: https://stackoverflow.com/a/48598564 """ def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False): self.midpoint = midpoint colors.Normalize.__init__(self, vmin, vmax, clip) def __call__(self, value, clip=None): # Note that I'm ignoring clipping and other edge cases here. result, is_scalar = self.process_value(value) x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1] return np.ma.array(np.interp(value, x, y), mask=result.mask, copy=False) # + def show_ndvi(ndvi): fig = plt.figure(figsize=(20,10)) ax = fig.add_subplot(111) # diverging color scheme chosen from https://matplotlib.org/users/colormaps.html cmap = plt.cm.RdYlGn mmin = np.nanmin(ndvi) mmax = np.nanmax(ndvi) # print((mmin, mmax)) mid = 0 cax = ax.imshow(ndvi, cmap=cmap, clim=(mmin, mmax), norm=MidpointNormalize(midpoint=mid,vmin=mmin, vmax=mmax)) ax.axis('off') ax.set_title('Normalized Difference Vegetation Index', fontsize=18, fontweight='bold') cbar = fig.colorbar(cax, orientation='horizontal', shrink=0.65) plt.show() # show_ndvi(ndvi) # - for imgfile, udmfile in zip(imgfiles, udmfiles): show_ndvi(read_ndvi(imgfile, read_notclear(udmfile))) # Okay, we got some beautiful NDVI images down! Notice on the second image that the clouds are masked out thanks to the UDM2 band.
jupyter-notebooks/analysis-ready-data/ard_1_intro_and_best_practices.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # General concepts of working with Orbit, Ephem, and Phys objects # =============================================================== # # The classes Orbit, Ephem, and Phys are all derived from the same base class ([DataClass](https://sbpy.readthedocs.io/en/latest/api/sbpy.data.DataClass.html#sbpy.data.DataClass)), meaning that they can all be used in the same way. # # The core of [DataClass](https://sbpy.readthedocs.io/en/latest/api/sbpy.data.DataClass.html#sbpy.data.DataClass) is an astropy [QTable](http://docs.astropy.org/en/stable/api/astropy.table.QTable.html#astropy.table.QTable) object, which is an astropy [Table](http://docs.astropy.org/en/stable/table/) that is aware of [units](http://docs.astropy.org/en/stable/units/). [DataClass](https://sbpy.readthedocs.io/en/latest/api/sbpy.data.DataClass.html#sbpy.data.DataClass) provides a number of convenience functions that make it easy to create and modify the underlying [QTable](http://docs.astropy.org/en/stable/api/astropy.table.QTable.html#astropy.table.QTable) object. If these convenience functions do not cover what you are trying to do, you can directly address and modify the [QTable](http://docs.astropy.org/en/stable/api/astropy.table.QTable.html#astropy.table.QTable) object. # # For some introductional examples on how to create, access, and modify [DataClass](https://sbpy.readthedocs.io/en/latest/api/sbpy.data.DataClass.html#sbpy.data.DataClass) objects, please have a look at the [documentation](https://sbpy.readthedocs.io/en/latest/sbpy/data.html). # # In the following sections, we provide some examples that are close to real use cases. # Adding a column and converting units # --------------------------- # # We obtain ephemerides for our target asteroid from JPL Horizons. # + from astropy.time import Time from sbpy.data import Ephem epoch1 = Time('2018-09-01 12:00', scale='utc') epoch2 = Time('2018-09-30 12:00', scale='utc') eph = Ephem.from_horizons('2018 RE3', location='568', epochs={'start': epoch1, 'stop': epoch2, 'step': '5d'}, skip_daylight=True) eph.table # - # Sky motion rates are provided for both RA and Dec in units of arcsec per hour: print(eph['RA_rate', 'DEC_rate']) # Let's assume that we are interested in the absolute sky motion in units of arcsec per second. It would be handy to add a column to `eph` that contains this quantity. This can be easily done using the [add_column](https://sbpy.readthedocs.io/en/latest/api/sbpy.data.DataClass.html#sbpy.data.DataClass.add_column) function. Finally, we want to print a table containing epoch, the distance to the observer, and the corresponding absolute sky motion: import numpy as np eph.add_column(np.sqrt(eph['RA_rate']**2 + eph['DEC_rate']**2).to('arcsec/second'), name='abs_rate') eph['datetime_str', 'delta', 'abs_rate'] # The function [add_column](https://sbpy.readthedocs.io/en/latest/api/sbpy.data.DataClass.html#sbpy.data.DataClass.add_column) takes two parameters: the data in form of a list or an array, and the name of the new column. The data that we provide here consists of two parts. We derive the absolute sky motion as the geometric sum of the RA and Dec rates (`np.sqrt(eph['RA_rate']**2 + eph['DEC_rate']**2)`). Remember that each of these rates is in units of arcsec per hour. In order to convert the rates to arcsec per sec, we have to apply `.to('arcsec/second')` to this array. Finally, we assign a name to this new column. # Filtering table content # ----------------------- # # We have a list of 5 asteroids that we would like to observe on the night of 2018-09-12 at the Discovery Channel telescope. However, we don't want to observe these asteroids when they have an absolute sky motion rate faster than 0.1 arcsec/s and when the Moon is up. # + import numpy as np import astropy.units as u from astropy.time import Time from sbpy.data import Ephem # target list targets = ['2018 RR4', '2018 RE3', '2018 RC4', '2018 RQ2', '2018 RC1'] epoch1 = Time('2018-09-13 00:00', scale='utc') epoch2 = Time('2018-09-14 00:00', scale='utc') eph = Ephem.from_horizons(targets, location='G37', epochs={'start': epoch1, 'stop': epoch2, 'step': '10m'}, skip_daylight=True) print(len(eph.table)) # - # A total of 345 ephemerides have been queried for the 5 asteroids. Now we apply the absolute sky motion rate (see example above) and moon filters; if the moon is up, `eph['flags']` will be set to `'m'` (see https://ssd.jpl.nasa.gov/?horizons_doc&table_quantities#table_quantities): # + eph = eph[np.sqrt(eph['RA_rate']**2 + eph['DEC_rate']**2).to('arcsec/second') < 0.1*u.arcsec/u.second] eph = eph[eph['flags'] != 'm'] eph # - # As it turns out, only asteroid 2018 RC1 is observable under these conditions. # Field Name Translations and Conversions # ======================================= # # Parameter names are usually not unique. For instance, `diameter` may also be referred to as `d` or `diam`, etc. sbpy acknowledges this ambiuity and provides internal translation tables for parameter names. Consider the following example: from sbpy.data import Phys import astropy.units as u data = Phys.from_dict({'d': 10*u.km}) print(data.table) # In this `Phys` object, the diameter is defined as `d` and can be obtained as such. The internal translations also allow for queries with similar parameter labels: data['d'] data['diameter'] data['D'] # The permitted labels are defined in the list `sbpy.data.core.fieldnames`. A human readable version of this list is available in [the alternative field name document](https://sbpy.readthedocs.io/en/latest/fieldnames.html). # # In addition to translations, sbpy is also using able to handle obvious property conversions, e.g., between and radius. Consider the following example: data['radius'] # Note that the object `data` does not contain a column `radius` before this query. However, the column is added as a result of the query: data.table
notebooks/data/General_concepts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: cs229 # language: python # name: cs229 # --- # + [markdown] pycharm={"name": "#%% md\n"} # ## PS2-2 Model Calibration # + [markdown] pycharm={"name": "#%% md\n"} # #### (a) # + [markdown] pycharm={"name": "#%% md\n"} # To derive the maximum likelihood parameters $\theta \in \mathbb{R}^{n + 1}$, $\forall j \in \{ 0, \dots , n \}$, # # \begin{align*} # \frac{\partial}{\partial \theta_j} \ell (\theta) & = \sum_{i = 1}^{m} (y^{(i)} - h_\theta (x^{(i)})) x_j^{(i)} \\ # & = 0 # \end{align*} # # Let $j = 0$. Since $x_0^{(i)} = 1$, the above equation becomes # # $$\sum_{i = 1}^{m} (y^{(i)} - h_\theta (x^{(i)})) = 0$$ # # In other words, # # $$\sum_{i = 1}^{m} h_\theta (x^{(i)}) = \sum_{i = 1}^{m} y^{(i)}$$ # # Because $(a, b) = (0, 1)$ and $y^{(i)} \in \{ 0, 1 \}$, # # \begin{align*} # \frac{\sum_{i \in I_{a, b}} P(y^{(i)} = 1 \ \vert \ x^{(i)}; \ \theta)}{\vert \{ i \in I_{a, b} \} \vert} & = \frac{1}{m} \sum_{i \in I_{a, b}} h_\theta (x^{(i)}) \\ # & = \frac{1}{m} \sum_{i \in I_{a, b}} \mathbb{I} \{ y^{(i)} = 1 \} \\ # & = \frac{\sum_{i \in I_{a, b}} \mathbb{I} \{ y^{(i)} = 1 \}}{\vert \{ i \in I_{a, b} \} \vert} # \end{align*} # + [markdown] pycharm={"name": "#%% md\n"} # #### (b) # + [markdown] pycharm={"name": "#%% md\n"} # No. A perfectly calibrated binary classification model does not necessarily imply that the model achieves perfect accuracy. # # Suppose our dataset is $\{ (x^{(i)}, y^{(i)} = 1) \}_{i = 1}^{m}$. If the model is perfectly calibrated, for $(a, b) = (\frac{1}{2}, 1)$, we have: # # $$\frac{\sum_{i \in I_{a, b}} \mathbb{I} \{ y^{(i)} = 1 \}}{\vert \{ i \in I_{a, b} \} \vert} = \frac{\sum_{i \in I_{a, b}} P(y^{(i)} = 1 \ \vert \ x^{(i)}; \ \theta)}{\vert \{ i \in I_{a, b} \} \vert} < 1$$ # # which indicates that it does not have perfect accuracy. # # Also, a model having perfect accuracy does not mean it is perfectly calibrated. Because # # $$\frac{\sum_{i \in I_{a, b}} \mathbb{I} \{ y^{(i)} = 1 \}}{\vert \{ i \in I_{a, b} \} \vert} = 1 > \frac{\sum_{i \in I_{a, b}} P(y^{(i)} = 1 \ \vert \ x^{(i)}; \ \theta)}{\vert \{ i \in I_{a, b} \} \vert}$$ # + [markdown] pycharm={"name": "#%% md\n"} # #### (c) # + [markdown] pycharm={"name": "#%% md\n"} # Apply L2 regularization to the cost function, # # \begin{align*} # \frac{\partial}{\partial \theta_j} J(\theta) & = - \sum_{i = 1}^{m} (y^{(i)} - h_\theta (x^{(i)})) x_j^{(i)} + \lambda \theta_j \\ # & = 0 # \end{align*} # # Obviously, if $\theta_0 = 0$, the property still holds. Otherwise, no.
problem_set_2/PS2-2 Model Calibration.ipynb
from datamart import search, augment from datamart.utilities.utils import Utils, SEARCH_URL import pandas as pd import json old_df = pd.read_csv("./example/taxi_example/taxi.csv") print("- READ THE SUPPLY DATASET -\n") print(old_df.iloc[:10, :]) with open("./example/taxi_example/taxi_query.json") as f_json: query_json = json.load(f_json) results = search(SEARCH_URL, query_json, old_df) print("- SEARCH DATAMART BY A DESCRIPTION JSON OBJECT -\n") print("Returned %d Datasets" % len(results)) for res in results: print(res.id) print('\t' + '\n\t'.join(json.dumps(res.metadata, indent=2).split('\n', 50)[:50])) print('\t... ...')
example/taxi_example/taxi_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # QUESTION 1 list1 = [1,3,5,7,3,8,3,4,6,3,2,4,7,9,4,3,2,2] num1 = int(input("Enter a number to find how many times it occurs - ")) list1.count(num1) # + #OR list1 = [1,3,5,7,3,8,3,4,6,3,2,4,7,9,4,3,2,2] for x in list1: list1.count(x) print(x, "repeats" ,list1.count(x), "times") # - # # I list3 = ["192.168.10.9","192.168.10.4","192.168.10.11","192.168.10.35",] list3.sort() print(list3) # + def fun(x): b = int(x.split(".")[3]) return b # - list3.sort(key = fun) print(list3) # # II # + list1 = [1,2,3,0,2,3,0,1,24,0,1,3,1,0,1,21,0,1,2,0,45] print(list1) # - list1.sort() b = list1.count(0) print(list1[b:] + list1[:b]) # # III list1 =[(10,4),(90,3),(9,1),(10,4),(9,5)] print(list1) # + def fun(x): return x[0] b= x[0]+x[1] return b list1.sort(key = fun) print(list1)
_Advanced Python - Day 3 Assignment .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Activity 4.05: Linear Regression for Animal Attribute Relations # You have a dataset about various animals including their body mass and maximum longevity. To discover if there is any linear relationship between these two variables a regression plot shall be used. # %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns mydata = pd.read_csv("../../Datasets/anage_data.csv") mydata.head() # Filter the data so you end up with samples containing a body mass and a maximum longevity. Only consider samples for the class Mammalia and a body mass below 200,000. longevity = 'Maximum longevity (yrs)' mass = 'Body mass (g)' data = mydata[mydata['Class'] == 'Mammalia'] data = data[np.isfinite(data[longevity]) & np.isfinite(data[mass]) & (data[mass] < 200000)] # Create a regression plot to visualize the linear relationship of the variables. # Create figure sns.set() plt.figure(figsize=(10, 6), dpi=300) # Create scatter plot sns.regplot(mass, longevity, data=data) # Show plot plt.show()
Chapter04/Activity4.05/Activity4.05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Arpeggio # # This song is built around an arpeggiated synthesizer (`wubwub.Arpeggiator`). Named chords are used to determine the notes played by said arpeggiator. # + from pysndfx import AudioEffectsChain import wubwub as wb import wubwub.sounds as snd # load sounds SYNTH = snd.load('synth.elka') BASS = snd.load('bass.synth') DRUMS = snd.load('drums.house') # init the sequencer seq = wb.Sequencer(bpm=110, beats=32) # create an arpeggiator, fill it with named chords arp = seq.add_arpeggiator(sample=SYNTH['C3'], name='arp', basepitch='C3', freq=1/6, method='updown') C = wb.chord_from_name(root='C2', lengths=8) + wb.chord_from_name(root='C3', add=12) E = wb.chord_from_name(root='E2', lengths=8) + wb.chord_from_name(root='E3', add=12) F = wb.chord_from_name(root='F2', lengths=8) + wb.chord_from_name(root='F3', add=12) Fm = wb.chord_from_name(root='F2', kind='m', lengths=8) + wb.chord_from_name(root='F3', kind='m', add=12) arp[1] = C arp[9] = E arp[17] = F arp[25] = Fm arp.effects = AudioEffectsChain().reverb(reverberance=10, wet_gain=1).lowpass(5000) # create a lead synth line # use a pattern to set the rhythm # add lots of effects lead1 = seq.add_sampler(sample=SYNTH['C3'], name='lead1', basepitch='C3') melodypat = wb.Pattern([1,3,4,5,7,9], length=16) lead1.make_notes(beats=melodypat, pitches=[4, 2, 0, 7, 4, 8], lengths=[2,2,2,2,2,8]) lead1.make_notes(beats=melodypat.onmeasure(2), pitches=[9, 7, 5, 4, 0, 2], lengths=[2,2,2,2,2,8]) lead1.effects = (AudioEffectsChain() .reverb(room_scale=100, wet_gain=4, reverberance=40) .delay(delays=[500,1000,1500,2000], decays=[.7]) .lowpass(6000)) lead1.volume += 5 # create a slightly detuned second synthesize to create a chorus effect lead2 = seq.duplicate_track('lead1', newname='lead2') lead2notes = {b:n.alter(pitch=n.pitch-.4) for b, n in lead1.notedict.items()} lead2.add_fromdict(lead2notes) # add a bass note bass = seq.add_sampler(sample=BASS['fat8tone'], name='bass', basepitch='C3') bass[[1,5]] = wb.Note(pitch='C3', length=4) bass[[9,13]] = wb.Note(pitch='E3', length=4) bass[[17, 21, 25, 29]] = wb.Note(pitch='F3', length=4) bass.volume -= 12 bass.effects = AudioEffectsChain().overdrive(colour=50).reverb().lowpass(1000) # add a lonely kick drum kick = seq.add_sampler(sample=DRUMS['kick7'], name='kick') kick.make_notes_every(4) kick.make_notes_every(4, offset=-.5) kick.clean() kick.effects = AudioEffectsChain().reverb().lowpass(600) kick.volume -= 4 # build the output seq.build(overhang=4)
examples/arpeggio.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Trabalhando com Tuplas (Imutáveis) # !python --version # + tags=[] # Tuplas usam parênteses como sintaxe tupla1 = () # - tupla1 tupla1 = ('Ferrari','Onça',17.900,500000) tupla1 # Python 3 aceita criação de uma tupla sem usar a virgula tupla = (10) # ### Imprimindo elementos da tupla a partir das posições tupla1 tupla1[0], tupla1[2], tupla1[3] # ### Tuplas são imutáveis !! (importante) tupla1[0] = 'Porshe' # Para inserir elemento em uma tupla tem que converter para lista e depois voltar para a tupla list_tupla = list(tupla1) list_tupla list_tupla[0] = "Porshe" list_tupla tupla1 = tuple(list_tupla) tupla1 # ### Criaçao de tuplas aninhadas tupla_aninhada = (1,2,3, (23,43,78)) tupla_aninhada[3] # Acessando o primeiro valor da lista aninhada tupla_aninhada[3][1] # # Operações Básicas tupla1 = ('Ferrari','Onça',17.900,500000) tupla1 # Operação de Repetição e criação de nova tupla tupla2 = tupla1 * 3 print(tupla2) # ### Concatenando Tuplas tupla_concatenada = (1, 2, 3) + (4, 5, 6) tupla_concatenada tupla1 # Checa se o elemento pertence a tupla 'Onça' in tupla1 # # Slicing # Percorre a tupla do elemento na posição 2 em diante. tupla1[2:] # Percorre a tupla do elemtento na posição 1 até o elemento na posição 3 tupla1[1:3] # # Métodos Disponíveis tupla1 # Conta a frequência de um elemento tupla1.count('elefante') # Retorna a posição do elemento. tupla1.index(17.9) # Retorna o tamanho da tupla len(tupla1) # Retorna o maior valor da tupla tupla2 = (2,3,4,128_787,90.0) max(tupla2) # Retorna o menor valor da tupla min(tupla2) # Criando uma lista lista = [1,3,4,3,89.9,765,'<NAME>','DataScience'] # Converte uma lista em tupla tupla3 = tuple(lista) type(tupla3) tupla3
Modulo004_Dominando_Python/Dominando o Python - Aulas 08 e 09.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # !pip install srcomapi import srcomapi, srcomapi.datatypes as dt api = srcomapi.SpeedrunCom() api.debug = 1 import statistics import matplotlib import matplotlib.pyplot as plt def getRunStats(game, category): # + search = input('Enter game name: ') result = api.search(srcomapi.datatypes.Game, {"name": search}) for game in result: print (result.index(game) + 1, game) # -
content/lessons/-FinalProject/FinalProjectAttempt1.ipynb
# --- # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # jupyter: # jupytext: # formats: ipynb,md:myst,py # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # # Autodidax: JAX core from scratch # # Ever want to learn how JAX works, but the implementation seemed impenetrable? # Well, you're in luck! By reading this tutorial, you'll learn every big idea in # JAX's core system. You'll even get clued into our weird jargon! # # **This is a work-in-progress draft.** There are some important ingredients # missing, still to come in parts 5 and 6 (and more?). There are also some # simplifications here that we haven't yet applied to the main system, but we # will. # ## Part 1: Transformations as interpreters: standard evaluation, `jvp`, and `vmap` # # We want to transform functions that look like this: # # ```python # def f(x): # y = sin(x) * 2. # z = - y + x # return z # ``` # # Think of functions like `sin` and the arithmetic operations underlying the # infix operators (`mul`, `add`, and `neg`) as primitive operations, meaning # atomic units of processing rather than compositions. # # "Transform" means "interpret differently." Instead of standard interpretation # where we apply primitive operations to numerical inputs to produce numerical # outputs, we want to override primitive application and let different values # flow through our program. For example, we might want to replace the # application of every primitive with an application of [its JVP # rule](https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html), # and let primal-tangent pairs flow through our program. Moreover, we want to be # able to compose multiple transformations, leading to stacks of interpreters. # ### JAX core machinery # # We can implement stacks of interpreters and even have them all discharge on # the fly as we execute the Python function to be transformed. To start, let's # define these primitives so that we can intercept their application: # + from typing import NamedTuple class Primitive(NamedTuple): name: str add_p = Primitive('add') mul_p = Primitive('mul') neg_p = Primitive("neg") sin_p = Primitive("sin") cos_p = Primitive("cos") reduce_sum_p = Primitive("reduce_sum") greater_p = Primitive("greater") transpose_p = Primitive("transpose") broadcast_p = Primitive("broadcast") def add(x, y): return bind1(add_p, x, y) def mul(x, y): return bind1(mul_p, x, y) def neg(x): return bind1(neg_p, x) def sin(x): return bind1(sin_p, x) def cos(x): return bind1(cos_p, x) def reduce_sum(x, axis=None): return bind1(reduce_sum_p, x, axis=axis) def greater(x, y): return bind1(greater_p, x, y) def transpose(x, perm): return bind1(transpose_p, perm=perm) def broadcast(x, shape, axes): return bind1(broadcast_p, x, shape=shape, axes=axes) def bind1(prim, *args, **params): out, = bind(prim, *args, **params) return out # - # We'll set up array data types and infix operator methods in a moment. # # A `Primitive` is just an object with a name, to which we attach our # interpretation rules (one for each transformation). The `bind` function is our # interception point: it'll figure out which transformation rule to apply, based # on how the arguments are boxed in tracers and what interpreters are active. # # The functions that user code calls, like `add` and `sin`, are just wrappers # around calls to `bind`. These wrappers let us control how arguments are passed # to `bind`, and in particular we follow a handy internal convention: when we # call `bind`, we pass values representing array data as positional arguments, # and we pass metadata like the `axis` argument to `sum_p` via keyword. This # calling convention simplifies some core logic (since e.g. instances of the # `Tracer` class to be defined below can only occur in positional arguments to # `bind`). The wrappers can also provide docstrings! # # We represent active interpreters as a stack. The stack is just a simple # `list`, and each element is a container with an integer level (corresponding # to the element's height in the stack), an interpreter type (which we'll call a # `trace_type`), and an optional field for any global data the interpreter # needs. We call each element a `MainTrace`, though maybe "Interpreter" would be # more descriptive. # + from contextlib import contextmanager from typing import Type, List, Optional, Any class MainTrace(NamedTuple): level: int trace_type: Type['Trace'] global_data: Optional[Any] trace_stack: List[MainTrace] = [] dynamic_trace: Optional[MainTrace] = None # to be employed in Part 3 @contextmanager def new_main(trace_type: Type['Trace'], global_data=None): level = len(trace_stack) main = MainTrace(level, trace_type, global_data) trace_stack.append(main) try: yield main finally: trace_stack.pop() # - # When we're about to apply a transformation, we'll push another interpreter # onto the stack using `new_main`. Then, as we apply primitives in the function, # we can think of the `bind` first being interpreted by the trace at the top of # the stack (i.e. with the highest level). If that first interpreter itself # binds other primitives in its interpretation rule for the primitive, like how # the JVP rule of `sin_p` might bind `cos_p` and `mul_p`, then those `bind` # calls will be handled by the interpreter at the next level down. # # What goes at the bottom of the interpreter stack? At the bottom, we know all # the transformation interpreters are finished, and we just want to do standard # evaluation. So at the bottom we'll put an evaluation interpreter. # # Let's sketch out the interface for interpreters, which is based on the `Trace` # and `Tracer` base classes. A `Tracer` represents a boxed-up value, perhaps # carrying some extra context data used by the interpreter. A `Trace` handles # boxing up values into `Tracers` and also handles primitive application. class Trace: main: MainTrace def __init__(self, main: MainTrace) -> None: self.main = main def pure(self, val): assert False # must override def lift(self, val): assert False # must override def process_primitive(self, primitive, tracers, params): assert False # must override # The first two methods are about boxing up values in `Tracer`s, which are the # objects that flow through the Python programs we transform. The last method is # the callback we'll use to interpret primitive application. # # The `Trace` itself doesn't contain any data, other than a reference to its # corresponding `MainTrace` instance. In fact, multiple instances of a `Trace` # might be created and discarded during an application of a transformation, # whereas only a single `MainTrace` instance is created per application of a # transformation. # # As for `Tracer`s themselves, each one carries an abstract value (and forwards # infix operators to it), and the rest is up to the transformation. (The # relationship between `Tracer`s and `AbstractValue`s is that there's one # `Tracer` per transformation, and at least one `AbstractValue` per base type, # like arrays.) # + import numpy as np from typing import Tuple class Tracer: _trace: Trace __array_priority__ = 1000 @property def aval(self): assert False # must override def full_lower(self): return self # default implementation def __neg__(self): return self.aval._neg(self) def __add__(self, other): return self.aval._add(self, other) def __radd__(self, other): return self.aval._radd(self, other) def __mul__(self, other): return self.aval._mul(self, other) def __rmul__(self, other): return self.aval._rmul(self, other) def __gt__(self, other): return self.aval._gt(self, other) def __bool__(self): return self.aval._bool(self) def __nonzero__(self): return self.aval._nonzero(self) def __getattr__(self, name): try: return getattr(self.aval, name) except AttributeError: raise AttributeError(f"{self.__class__.__name__} has no attribute {name}") def swap(f): return lambda x, y: f(y, x) # + class ShapedArray: array_abstraction_level = 1 shape: Tuple[int] dtype: np.dtype def __init__(self, shape, dtype): self.shape = shape self.dtype = dtype @property def ndim(self): return len(self.shape) _neg = staticmethod(neg) _add = staticmethod(add) _radd = staticmethod(swap(add)) _mul = staticmethod(mul) _rmul = staticmethod(swap(mul)) _gt = staticmethod(greater) @staticmethod def _bool(tracer): raise Exception("ShapedArray can't be unambiguously converted to bool") @staticmethod def _nonzero(tracer): raise Exception("ShapedArray can't be unambiguously converted to bool") def str_short(self): return f'{self.dtype.name}[{",".join(str(d) for d in self.shape)}]' def __hash__(self): return hash((self.shape, self.dtype)) def __eq__(self, other): return (type(self) is type(other) and self.shape == other.shape and self.dtype == other.dtype) def __repr__(self): return f"ShapedArray(shape={self.shape}, dtype={self.dtype})" class ConcreteArray(ShapedArray): array_abstraction_level = 2 val: np.ndarray def __init__(self, val): self.val = val self.shape = val.shape self.dtype = val.dtype @staticmethod def _bool(tracer): return bool(tracer.aval.val) @staticmethod def _nonzero(tracer): return bool(tracer.aval.val) def get_aval(x): if isinstance(x, Tracer): return x.aval elif type(x) in jax_types: return ConcreteArray(np.asarray(x)) else: raise TypeError(x) jax_types = {bool, int, float, np.bool_, np.int32, np.int64, np.float32, np.float64, np.ndarray} # - # Notice that we actually have two `AbstractValue`s for arrays, representing # different levels of abstraction. A `ShapedArray` represents the set of all # possible arrays with a given shape and dtype. A `ConcreteArray` represents a # singleton set consisting of a single array value. # # Now that we've set up the interpreter stack, the Trace/Tracer API for # interpreters, and abstract values, we can come back to implement `bind`: def bind(prim, *args, **params): top_trace = find_top_trace(args) tracers = [full_raise(top_trace, arg) for arg in args] outs = top_trace.process_primitive(prim, tracers, params) return [full_lower(out) for out in outs] # The main action is that we call `find_top_trace` to figure out which # interpreter should handle this primitive application. We then call that top # trace's `process_primitive` so that the trace can apply its interpretation # rule. The calls to `full_raise` just ensure that the inputs are boxed in the # top trace's `Tracer` instances, and the call to `full_lower` is an optional # optimization so that we unbox values out of `Tracer`s as much as possible. # + import operator as op def find_top_trace(xs) -> Trace: top_main = max((x._trace.main for x in xs if isinstance(x, Tracer)), default=trace_stack[0], key=op.attrgetter('level')) if dynamic_trace and dynamic_trace.level > top_main.level: top_main = dynamic_trace return top_main.trace_type(top_main) # - # In words, ignoring the `dynamic_trace` step until Part 3, `find_top_trace` # returns the highest-level interpreter associated with the `Tracer`s on its # inputs, and otherwise returns the interpreter at the bottom of the stack # (which is always an evaluation trace, at least for now). This is a deviation # from the description above, where we always start by running the interpreter # at the top of the stack and then work our way down, applying every interpreter # in the stack. Instead, we're only applying an interpreter when the input # arguments to a primitive bind are boxed in a `Tracer` corresponding to that # interpreter. This optimization lets us skip irrelevant transformations, but # bakes in an assumption that transformations mostly follow data dependence # (except for the special bottom-of-the-stack interpreter, which interprets # everything). # # An alternative would be to have every interpreter in the stack interpret every # operation. That's worth exploring! JAX is designed around data dependence in # large part because that's so natural for automatic differentiation, and JAX's # roots are in autodiff. But it may be over-fit. # + def full_lower(val: Any): if isinstance(val, Tracer): return val.full_lower() else: return val def full_raise(trace: Trace, val: Any) -> Tracer: if not isinstance(val, Tracer): assert type(val) in jax_types return trace.pure(val) level = trace.main.level if val._trace.main is trace.main: return val elif val._trace.main.level < level: return trace.lift(val) elif val._trace.main.level > level: raise Exception(f"Can't lift level {val._trace.main.level} to {level}.") else: # val._trace.level == level raise Exception(f"Different traces at same level: {val._trace}, {trace}.") # - # The logic in `full_raise` serves to box values into `Tracer`s for a particular # `Trace`, calling different methods on the `Trace` based on context: # `Trace.pure` is called on non-`Tracer` constants, and `Trace.lift` is called # for values that are already `Tracer`s from a lower-level interpreter. These # two methods could share the same implementation, but by distinguishing them in # the core logic we can provide more information to the `Trace` subclass. # # That's it for the JAX core! Now we can start adding interpreters. # ### Evaluation interpreter # # We'll start with the simplest interpreter: the evaluation interpreter that # will sit at the bottom of the interpreter stack. # + class EvalTrace(Trace): pure = lift = lambda self, x: x # no boxing in Tracers needed def process_primitive(self, primitive, tracers, params): return impl_rules[primitive](*tracers, **params) trace_stack.append(MainTrace(0, EvalTrace, None)) # special bottom of the stack # NB: in JAX, instead of a dict we attach impl rules to the Primitive instance impl_rules = {} impl_rules[add_p] = lambda x, y: [np.add(x, y)] impl_rules[mul_p] = lambda x, y: [np.multiply(x, y)] impl_rules[neg_p] = lambda x: [np.negative(x)] impl_rules[sin_p] = lambda x: [np.sin(x)] impl_rules[cos_p] = lambda x: [np.cos(x)] impl_rules[reduce_sum_p] = lambda x, *, axis: [np.sum(x, axis)] impl_rules[greater_p] = lambda x, y: [np.greater(x, y)] impl_rules[transpose_p] = lambda x, *, perm: [np.transpose(x, perm)] def broadcast_impl(x, *, shape, axes): return [np.broadcast_to(np.expand_dims(x, axes), shape)] impl_rules[broadcast_p] = broadcast_impl # - # With this interpreter, we can evaluate user functions: # + def f(x): y = sin(x) * 2. z = - y + x return z print(f(3.0)) # - # Woo! Like going around in a big circle. But the point of this indirection is # that now we can add some real transformations. # ### Forward-mode autodiff with `jvp` # # First, a few helper functions: # + def zeros_like(val): return np.zeros_like(val) def unzip2(pairs): lst1, lst2 = [], [] for x1, x2 in pairs: lst1.append(x1) lst2.append(x2) return lst1, lst2 map_ = map def map(f, *xs): return list(map_(f, *xs)) # - # The `Tracer` for forward-mode autodiff carries a primal-tangent pair. The # `Trace` applies JVP rules. # + class JVPTracer(Tracer): def __init__(self, trace, primal, tangent): self._trace = trace self.primal = primal self.tangent = tangent @property def aval(self): return get_aval(self.primal) class JVPTrace(Trace): pure = lift = lambda self, val: JVPTracer(self, val, zeros_like(val)) def process_primitive(self, primitive, tracers, params): primals_in, tangents_in = unzip2((t.primal, t.tangent) for t in tracers) jvp_rule = jvp_rules[primitive] primal_outs, tangent_outs = jvp_rule(primals_in, tangents_in, **params) return [JVPTracer(self, x, t) for x, t in zip(primal_outs, tangent_outs)] jvp_rules = {} # - # Notice both `lift` and `sublift` package a value into a `JVPTracer` with the # minimal amount of context, which is a zero tangent value. # # Let's add some JVP rules for primitives: # + def add_jvp(primals, tangents): (x, y), (x_dot, y_dot) = primals, tangents return [x + y], [x_dot + y_dot] jvp_rules[add_p] = add_jvp def mul_jvp(primals, tangents): (x, y), (x_dot, y_dot) = primals, tangents return [x * y], [x_dot * y + x * y_dot] jvp_rules[mul_p] = mul_jvp def sin_jvp(primals, tangents): (x,), (x_dot,) = primals, tangents return [sin(x)], [cos(x) * x_dot] jvp_rules[sin_p] = sin_jvp def cos_jvp(primals, tangents): (x,), (x_dot,) = primals, tangents return [cos(x)], [-sin(x) * x_dot] jvp_rules[cos_p] = cos_jvp def neg_jvp(primals, tangents): (x,), (x_dot,) = primals, tangents return [neg(x)], [neg(x_dot)] jvp_rules[neg_p] = neg_jvp def reduce_sum_jvp(primals, tangents, *, axis): (x,), (x_dot,) = primals, tangents return [reduce_sum(x, axis)], [reduce_sum(x_dot, axis)] jvp_rules[reduce_sum_p] = reduce_sum_jvp def greater_jvp(primals, tangents): (x, y), _ = primals, tangents out_primal = greater(x, y) return [out_primal], [zeros_like(out_primal)] jvp_rules[greater_p] = greater_jvp # - # Finally, we add a transformation API to kick off the trace: def jvp_v1(f, primals, tangents): with new_main(JVPTrace) as main: trace = JVPTrace(main) tracers_in = [JVPTracer(trace, x, t) for x, t in zip(primals, tangents)] out = f(*tracers_in) tracer_out = full_raise(trace, out) primal_out, tangent_out = tracer_out.primal, tracer_out.tangent return primal_out, tangent_out # And with that, we can differentiate! x = 3.0 y, sin_deriv_at_3 = jvp_v1(sin, (x,), (1.0,)) print(sin_deriv_at_3) print(cos(3.0)) # + def f(x): y = sin(x) * 2. z = - y + x return z x, xdot = 3., 1. y, ydot = jvp_v1(f, (x,), (xdot,)) print(y) print(ydot) # + def deriv(f): return lambda x: jvp_v1(f, (x,), (1.,))[1] print(deriv(sin)(3.)) print(deriv(deriv(sin))(3.)) print(deriv(deriv(deriv(sin)))(3.)) print(deriv(deriv(deriv(deriv(sin))))(3.)) # + def f(x): if x > 0.: # Python control flow return 2. * x else: return x print(deriv(f)(3.)) print(deriv(f)(-3.)) # - # ## Pytrees and flattening user functions' inputs and outputs # A limitation with `jvp_v1` is that it assumes the user function accepts arrays # as positional arguments and produces a single array as output. What if it # produced a list as output? Or accepted nested containers as inputs? It would # be a pain to deal with all the possible containers in inputs and outputs at # every layer of the stack. Instead, we can wrap the user function so that the # wrapped version accepts arrays as inputs and returns a flat list of arrays as # output. The wrapper just needs to unflatten its input, call the user function, # and flatten the output. # # Here's how we'd like to write `jvp`, assuming the user always gives us # functions that take arrays as inputs and produces a flat list of arrays as # outputs: def jvp_flat(f, primals, tangents): with new_main(JVPTrace) as main: trace = JVPTrace(main) tracers_in = [JVPTracer(trace, x, t) for x, t in zip(primals, tangents)] outs = f(*tracers_in) tracers_out = [full_raise(trace, out) for out in outs] primals_out, tangents_out = unzip2((t.primal, t.tangent) for t in tracers_out) return primals_out, tangents_out # To support user functions that have arbitrary containers in the inputs and # outputs, here's how we'd write the user-facing `jvp` wrapper: def jvp(f, primals, tangents): primals_flat, in_tree = tree_flatten(primals) tangents_flat, in_tree2 = tree_flatten(tangents) if in_tree != in_tree2: raise TypeError f, out_tree = flatten_fun(f, in_tree) primals_out_flat, tangents_out_flat = jvp_flat(f, primals_flat, tangents_flat) primals_out = tree_unflatten(out_tree(), primals_out_flat) tangents_out = tree_unflatten(out_tree(), tangents_out_flat) return primals_out, tangents_out # Notice that we had to plumb the tree structure of the user function output # back to the caller of `flatten_fun`. That information isn't available until we # actually run the user function, so `flatten_fun` just returns a reference to a # mutable cell, represented as a thunk. These side-effects are safe because we # always run the user function exactly once. (This safe regime is the reason for # the "linear" name in `linear_util.py`, in the sense of [linear # types](https://en.wikipedia.org/wiki/Substructural_type_system).) # # All that remains is to write `tree_flatten`, `tree_unflatten`, and # `flatten_fun`. # + tags=["hide-input"] def flatten_fun(f, in_tree): store = Store() def flat_fun(*args_flat): pytree_args = tree_unflatten(in_tree, args_flat) out = f(*pytree_args) out_flat, out_tree = tree_flatten(out) store.set_value(out_tree) return out_flat return flat_fun, store class Empty: pass empty = Empty() class Store: val = empty def set_value(self, val): assert self.val is empty self.val = val def __call__(self): return self.val # + tags=["hide-input"] import itertools as it from typing import Callable, Type, Hashable, Dict, Iterable, Iterator class NodeType(NamedTuple): name: str to_iterable: Callable from_iterable: Callable def register_pytree_node(ty: Type, to_iter: Callable, from_iter: Callable ) -> None: node_types[ty] = NodeType(str(ty), to_iter, from_iter) node_types: Dict[Type, NodeType] = {} register_pytree_node(tuple, lambda t: (None, t), lambda _, xs: tuple(xs)) register_pytree_node(list, lambda l: (None, l), lambda _, xs: list(xs)) register_pytree_node(dict, lambda d: map(tuple, unzip2(sorted(d.items()))), lambda keys, vals: dict(zip(keys, vals))) class PyTreeDef(NamedTuple): node_type: NodeType node_metadata: Hashable child_treedefs: Tuple['PyTreeDef'] class Leaf: pass leaf = Leaf() def tree_flatten(x: Any) -> Tuple[List[Any], PyTreeDef]: children_iter, treedef = _tree_flatten(x) return list(children_iter), treedef def _tree_flatten(x: Any) -> Tuple[Iterable, PyTreeDef]: node_type = node_types.get(type(x)) if node_type: node_metadata, children = node_type.to_iterable(x) children_flat, child_trees = unzip2(map(_tree_flatten, children)) flattened = it.chain.from_iterable(children_flat) return flattened, PyTreeDef(node_type, node_metadata, tuple(child_trees)) else: return [x], leaf def tree_unflatten(treedef: PyTreeDef, xs: List[Any]) -> Any: return _tree_unflatten(treedef, iter(xs)) def _tree_unflatten(treedef: PyTreeDef, xs: Iterator) -> Any: if treedef is leaf: return next(xs) else: children = (_tree_unflatten(t, xs) for t in treedef.child_treedefs) return treedef.node_type.from_iterable(treedef.node_metadata, children) # - # With this pytree-handling `jvp` impelmentation, we can now handle arbitrary # input and output containers. That'll come in handy with future transformations # too! # + def f(x): y = sin(x) * 2. z = - y + x return {'hi': z, 'there': [x, y]} x, xdot = 3., 1. y, ydot = jvp(f, (x,), (xdot,)) print(y) print(ydot) # - # ### Vectorized batching with `vmap` # # First, a couple helper functions, one for producing mapped abstract values # from unmapped ones (by removing an axis), and one for moving batch dimensions # around: # + def mapped_aval(batch_dim, aval): shape = list(aval.shape) del shape[batch_dim] return ShapedArray(tuple(shape), aval.dtype) def move_batch_axis(axis_size, src, dst, x): if src is not_mapped: target_shape = list(np.shape(x)) target_shape.insert(dst, axis_size) return broadcast(x, target_shape, [dst]) elif src == dst: return x else: return moveaxis(x, src, dst) def moveaxis(x, src: int, dst: int): perm = [i for i in range(np.ndim(x)) if i != src] perm.insert(dst, src) return transpose(x, perm) # - # The `Tracer` for vectorized batching carries a batched value and an optional # integer indicating which axis (if any) is the batch axis. # + from typing import Union class NotMapped: pass not_mapped = NotMapped() BatchAxis = Union[NotMapped, int] class BatchTracer(Tracer): def __init__(self, trace, val, batch_dim: BatchAxis): self._trace = trace self.val = val self.batch_dim = batch_dim @property def aval(self): if self.batch_dim is not_mapped: return get_aval(self.val) else: return mapped_aval(self.batch_dim, get_aval(self.val)) def full_lower(self): if self.batch_dim is not_mapped: return full_lower(self.val) else: return self class BatchTrace(Trace): pure = lift = lambda self, val: BatchTracer(self, val, not_mapped) def process_primitive(self, primitive, tracers, params): vals_in, bdims_in = unzip2((t.val, t.batch_dim) for t in tracers) vmap_rule = vmap_rules[primitive] val_outs, bdim_outs = vmap_rule(self.axis_size, vals_in, bdims_in, **params) return [BatchTracer(self, x, bd) for x, bd in zip(val_outs, bdim_outs)] @property def axis_size(self): return self.main.global_data vmap_rules = {} # - # Here we've implemented the optional `Tracer.full_lower` method, which lets us # peel off a batching tracer if it's not needed because it doesn't represent a # batched value. # # For `BatchTrace`, analogous to `JVPTrace`, the methods `pure` and `lift` just # box a value in a `BatchTracer` with the minimal amount of context, which in # this case is a `batch_dim` taking the sentinel value `not_mapped`. Notice we # use the `MainTrace`'s interpreter-global data field to store the batch axis # size. # # Next we can define batching interpreter rules for each primitive: # + from functools import partial def broadcasting_binop_batching_rule(op, axis_size, vals_in, dims_in): (x, y), (x_bdim, y_bdim) = vals_in, dims_in if x_bdim != y_bdim: if x_bdim is not_mapped: x = move_batch_axis(axis_size, x_bdim, y_bdim, x) else: y = move_batch_axis(axis_size, y_bdim, x_bdim, y) return [op(x, y)], [x_bdim] vmap_rules[add_p] = partial(broadcasting_binop_batching_rule, add) vmap_rules[mul_p] = partial(broadcasting_binop_batching_rule, mul) def vectorized_unop_batching_rule(op, axis_size, vals_in, dims_in): (x,), (x_bdim,) = vals_in, dims_in return [op(x)], [x_bdim] vmap_rules[sin_p] = partial(vectorized_unop_batching_rule, sin) vmap_rules[cos_p] = partial(vectorized_unop_batching_rule, cos) vmap_rules[neg_p] = partial(vectorized_unop_batching_rule, neg) def reduce_sum_batching_rule(axis_size, vals_in, dims_in, *, axis): (x,), (x_bdim,) = vals_in, dims_in new_axis = axis + (x_bdim <= axis) out_bdim = x_bdim - (new_axis < x_bdim) return [reduce_sum(x, new_axis)], [out_bdim] vmap_rules[reduce_sum_p] = reduce_sum_batching_rule # - # Finally, we add a transformation API to kick off the trace: # + def vmap_flat(f, in_axes, *args): axis_size, = {x.shape[ax] for x, ax in zip(args, in_axes) if ax is not not_mapped} with new_main(BatchTrace, axis_size) as main: trace = BatchTrace(main) tracers_in = [BatchTracer(trace, x, ax) if ax is not None else x for x, ax in zip(args, in_axes)] outs = f(*tracers_in) tracers_out = [full_raise(trace, out) for out in outs] vals_out, bdims_out = unzip2((t.val, t.batch_dim) for t in tracers_out) outs_transposed = [move_batch_axis(axis_size, bdim, 0, val_out) for val_out, bdim in zip(vals_out, bdims_out)] return outs_transposed def vmap(f, in_axes): def batched_f(*args): args_flat, in_tree = tree_flatten(args) in_axes_flat, in_tree2 = tree_flatten(in_axes) if in_tree != in_tree2: raise TypeError f_flat, out_tree = flatten_fun(f, in_tree) outs_flat = vmap_flat(f_flat, in_axes_flat, *args_flat) return tree_unflatten(out_tree(), outs_flat) return batched_f # + def add_one_to_a_scalar(scalar): assert np.ndim(scalar) == 0 return 1 + scalar vector_in = np.arange(3.) vector_out = vmap(add_one_to_a_scalar, (0,))(vector_in) print(vector_in) print(vector_out) # + def jacfwd(f, x): pushfwd = lambda v: jvp(f, (x,), (v,))[1] vecs_in = np.eye(np.size(x)).reshape(np.shape(x) * 2) return vmap(pushfwd, (0,))(vecs_in) def f(x): return sin(x) jacfwd(f, np.arange(3.)) # - # That's it for `jvp` and `vmap`! # ## Part 2: Jaxprs # # The next transformations are the horizon are `jit` for just-in-time # compilation and `vjp` for reverse-mode autodiff. (`grad` is just a small # wrapper around `vjp`.) Whereas `jvp` and `vmap` only needed each `Tracer` to # carry a little bit of extra context, for both `jit` and `vjp` we need much # richer context: we need to represent _programs_. That is, we need jaxprs! # # Jaxprs are JAX's internal intermediate representation of programs. They are # explicitly typed, functional, first-order, and in ANF form. We need a # program representation for `jit` because the purpose of `jit` is to stage # computation out of Python. For any computation we want to stage out, we need # to be able to represent it as data, and build it up as we trace a Python # function. Similarly, `vjp` needs a way to represent the computation for the # backward pass of reverse-mode autodiff. We use the same jaxpr program # representation for both needs. # # (Building a program representation is the most # [free](https://en.wikipedia.org/wiki/Free_object) kind of # trace-transformation, and so except for issues around handling native Python # control flow, any transformation could be implemented by first tracing to a # jaxpr and then interpreting the jaxpr.) # ### Jaxpr data strutures # # The jaxpr term syntax is roughly: # # ``` # jaxpr ::= # { lambda <binder> , ... . # let <eqn> # ... # in ( <atom> , ... ) } # # binder ::= <var>:<array_type> # var ::= a | b | c | ... # atom ::= <var> | <literal> # literal ::= <int32> | <int64> | <float32> | <float64> # # eqn ::= <binder> , ... = <primitive> [ <params> ] <atom> , ... # ``` # # The syntax of types is: # # ``` # jaxpr_type ::= [ <array_type> , ... ] -> [ <array_type> , ... ] # array_type ::= <dtype>[<shape>] # dtype ::= f32 | f64 | i32 | i64 # shape ::= <int> , ... # ``` # # How do we represent these as Python data structures? We reuse ShapedArrays to # represent types, and we can represent the term syntax with a few Python # structs: # + from typing import Set class Var: aval: ShapedArray def __init__(self, aval): self.aval = aval class Lit: val: Any aval: ShapedArray def __init__(self, val): self.val = val self.aval = raise_to_shaped(get_aval(self.val)) Atom = Union[Var, Lit] class JaxprEqn(NamedTuple): primitive: Primitive inputs: List[Atom] params: Dict[str, Any] out_binders: List[Var] class Jaxpr(NamedTuple): in_binders: List[Var] eqns: List[JaxprEqn] outs: List[Atom] def __hash__(self): return id(self) __eq__ = op.is_ def raise_to_shaped(aval): return ShapedArray(aval.shape, aval.dtype) # - # Type-checking a jaxpr involves checking that there are no unbound variables, # that variables are only bound once, and that for each equation the type of # the primitive application matches the type of the output binders. # + class JaxprType(NamedTuple): in_types: List[ShapedArray] out_types: List[ShapedArray] def __repr__(self): in_types = ', '.join(aval.str_short() for aval in self.in_types) out_types = ', '.join(aval.str_short() for aval in self.out_types) return f'({in_types}) -> ({out_types})' def typecheck_jaxpr(jaxpr: Jaxpr) -> JaxprType: env: Set[Var] = set() for v in jaxpr.in_binders: if v in env: raise TypeError env.add(v) for eqn in jaxpr.eqns: in_types = [typecheck_atom(env, x) for x in eqn.inputs] out_types = abstract_eval_rules[eqn.primitive](*in_types, **eqn.params) for out_binder, out_type in zip(eqn.out_binders, out_types): if not out_type == out_binder.aval: raise TypeError for out_binder in eqn.out_binders: if out_binder in env: raise TypeError env.add(out_binder) in_types = [v.aval for v in jaxpr.in_binders] out_types = [typecheck_atom(env, x) for x in jaxpr.outs] return JaxprType(in_types, out_types) def typecheck_atom(env: Set[Var], x: Atom) -> ShapedArray: if isinstance(x, Var): if x not in env: raise TypeError("unbound variable") return x.aval elif isinstance(x, Lit): return raise_to_shaped(get_aval(x.val)) else: assert False # - # We can apply the function represented by a jaxpr to arguments with a simple # interpreter. # + def eval_jaxpr(jaxpr: Jaxpr, args: List[Any]) -> List[Any]: env: Dict[Var, Any] = {} def read(x: Atom) -> Any: return env[x] if type(x) is Var else x.val def write(v: Var, val: Any) -> None: assert v not in env # single-assignment env[v] = val map(write, jaxpr.in_binders, args) for eqn in jaxpr.eqns: in_vals = map(read, eqn.inputs) outs = bind(eqn.primitive, *in_vals, **eqn.params) map(write, eqn.out_binders, outs) return map(read, jaxpr.outs) def jaxpr_as_fun(jaxpr: Jaxpr): return lambda *args: eval_jaxpr(jaxpr, args) # - # By using `bind` in the interpreter, this interpreter itself is traceable. # ### Building jaxprs with tracing # # Now that we have jaxprs as a data structure, we need ways to produce these # from tracing Python code. In general there are two variants of how we trace to # a jaxpr; `jit` uses one and `vjp` uses the other. We'll start with the one # used by `jit`, which is also used by control flow primitives like `lax.cond`, # `lax.while_loop`, and `lax.scan`. # + # NB: the analogous class in JAX is called 'DynamicJaxprTracer' class JaxprTracer(Tracer): __slots__ = ['aval'] aval: ShapedArray def __init__(self, trace, aval): self._trace = trace self.aval = aval # NB: the analogous class in JAX is called 'DynamicJaxprTrace' class JaxprTrace(Trace): def new_arg(self, aval: ShapedArray) -> JaxprTracer: aval = raise_to_shaped(aval) tracer = self.builder.new_tracer(self, aval) self.builder.tracer_to_var[id(tracer)] = Var(aval) return tracer def get_or_make_const_tracer(self, val: Any) -> JaxprTracer: tracer = self.builder.const_tracers.get(id(val)) if tracer is None: tracer = self.builder.new_tracer(self, raise_to_shaped(get_aval(val))) self.builder.add_const(tracer, val) return tracer pure = lift = get_or_make_const_tracer def process_primitive(self, primitive, tracers, params): avals_in = [t.aval for t in tracers] avals_out = abstract_eval_rules[primitive](*avals_in, **params) out_tracers = [self.builder.new_tracer(self, a) for a in avals_out] inputs = [self.builder.getvar(t) for t in tracers] outvars = [self.builder.add_var(t) for t in out_tracers] self.builder.add_eqn(JaxprEqn(primitive, inputs, params, outvars)) return out_tracers @property def builder(self): return self.main.global_data # NB: in JAX, we instead attach abstract eval rules to Primitive instances abstract_eval_rules = {} # - # Notice that we keep as interpreter-global data a builder object, which keeps # track of variables, constants, and eqns as we build up the jaxpr. class JaxprBuilder: eqns: List[JaxprEqn] tracer_to_var: Dict[int, Var] const_tracers: Dict[int, JaxprTracer] constvals: Dict[Var, Any] tracers: List[JaxprTracer] def __init__(self): self.eqns = [] self.tracer_to_var = {} self.const_tracers = {} self.constvals = {} self.tracers = [] def new_tracer(self, trace: JaxprTrace, aval: ShapedArray) -> JaxprTracer: tracer = JaxprTracer(trace, aval) self.tracers.append(tracer) return tracer def add_eqn(self, eqn: JaxprEqn) -> None: self.eqns.append(eqn) def add_var(self, tracer: JaxprTracer) -> Var: assert id(tracer) not in self.tracer_to_var var = self.tracer_to_var[id(tracer)] = Var(tracer.aval) return var def getvar(self, tracer: JaxprTracer) -> Var: var = self.tracer_to_var.get(id(tracer)) assert var is not None return var def add_const(self, tracer: JaxprTracer, val: Any) -> Var: var = self.add_var(tracer) self.const_tracers[id(val)] = tracer self.constvals[var] = val return var def build(self, in_tracers: List[JaxprTracer], out_tracers: List[JaxprTracer] ) -> Tuple[Jaxpr, List[Any]]: constvars, constvals = unzip2(self.constvals.items()) t2v = lambda t: self.tracer_to_var[id(t)] in_binders = constvars + [t2v(t) for t in in_tracers] out_vars = [t2v(t) for t in out_tracers] jaxpr = Jaxpr(in_binders, self.eqns, out_vars) typecheck_jaxpr(jaxpr) return jaxpr, constvals # The rules we need for `JaxprTrace.process_primitive` are essentially typing # rules for primitive applications: given the primitive, its parameters, and # types for the inputs, the rule must produce a type for the output, which is # then packaged with the output `JaxprTracer`. We can use abstract evaluation # rules for this same purpose, even though they can be more general (since # abstract evaluation rules must accept ConcreteArray inputs, and since they # need only return an upper bound on the set of possible outputs, they can # produce ConcreteArray outputs as well). We'll reuse these abstract evaluation # rules for the other jaxpr-producing trace machinery, where the potential extra # generality is useful. # + def broadcast_shapes(*shapes): assert len(shapes) > 1 for sizes in zip(*shapes): sizes = [d for d in sizes if d != 1] if sizes[:-1] != sizes[1:]: raise Exception return tuple(next((d for d in sizes if d != 1), 1) for sizes in zip(*shapes)) def broadcasting_binop_abstract_eval_rule(*avals_in): out_dtype = np.result_type(*map(np.result_type, avals_in)) out_shape = broadcast_shapes(*map(np.shape, avals_in)) return [ShapedArray(out_shape, out_dtype)] abstract_eval_rules[add_p] = broadcasting_binop_abstract_eval_rule abstract_eval_rules[mul_p] = broadcasting_binop_abstract_eval_rule def vectorized_unop_abstract_eval_rule(aval_in): return [ShapedArray(np.shape(aval_in), np.result_type(aval_in))] abstract_eval_rules[sin_p] = vectorized_unop_abstract_eval_rule abstract_eval_rules[cos_p] = vectorized_unop_abstract_eval_rule abstract_eval_rules[neg_p] = vectorized_unop_abstract_eval_rule def reduce_sum_abstract_eval_rule(aval_in, *, axis): new_shape = [d for i, d in enumerate(aval_in.shape) if i != axis] return [ShapedArray(tuple(new_shape), aval_in.dtype)] abstract_eval_rules[reduce_sum_p] = reduce_sum_abstract_eval_rule def broadcast_abstract_eval(x, *, shape, axes): return [ShapedArray(tuple(shape), np.result_type(x))] abstract_eval_rules[broadcast_p] = broadcast_abstract_eval # - # To check our implementation of jaxprs, we can add a `make_jaxpr` # transformation and a pretty-printer: # + from functools import lru_cache @lru_cache() # ShapedArrays are hashable def make_jaxpr_v1(f, *avals_in): avals_in, in_tree = tree_flatten(avals_in) f, out_tree = flatten_fun(f, in_tree) builder = JaxprBuilder() with new_main(JaxprTrace, builder) as main: trace = JaxprTrace(main) tracers_in = [trace.new_arg(aval) for aval in avals_in] outs = f(*tracers_in) tracers_out = [full_raise(trace, out) for out in outs] jaxpr, consts = builder.build(tracers_in, tracers_out) return jaxpr, consts, out_tree() # + tags=["hide-input"] from collections import defaultdict import string class PPrint: lines: List[Tuple[int, str]] def __init__(self, lines): self.lines = lines def indent(self, indent: int) -> 'PPrint': return PPrint([(indent + orig_indent, s) for orig_indent, s in self.lines]) def __add__(self, rhs: 'PPrint') -> 'PPrint': return PPrint(self.lines + rhs.lines) def __rshift__(self, rhs: 'PPrint') -> 'PPrint': if not rhs.lines: return self if not self.lines: return rhs indent, s = self.lines[-1] indented_block = rhs.indent(indent + len(s)) common_line = s + ' ' * rhs.lines[0][0] + rhs.lines[0][1] return PPrint(self.lines[:-1] + [(indent, common_line)] + indented_block.lines[1:]) def __str__(self) -> str: return '\n'.join(' ' * indent + s for indent, s in self.lines) def pp(s: Any) -> PPrint: return PPrint([(0, line) for line in str(s).splitlines()]) def vcat(ps: List[PPrint]) -> PPrint: return sum(ps, pp('')) def pp_jaxpr(jaxpr: Jaxpr): namegen = (''.join(s) for r in it.count(1) for s in it.permutations(string.ascii_lowercase, r)) names = defaultdict(lambda: next(namegen)) in_binders = ', '.join(var_str(names, x) for x in jaxpr.in_binders) eqns = vcat([pp_eqn(names, e) for e in jaxpr.eqns]) outs = ', '.join(names[v] if isinstance(v, Var) else str(v.val) for v in jaxpr.outs) return (pp(f'{{ lambda {in_binders} .') + ((pp('let ') >> eqns) + pp(f'in ( {outs} ) }}')).indent(2)) def var_str(names: Dict[Var, str], v: Var) -> str: return f'{names[v]}:{v.aval.str_short()}' def pp_eqn(names: Dict[Var, str], eqn: JaxprEqn) -> PPrint: lhs = pp(' '.join(var_str(names, v) for v in eqn.out_binders)) rhs = (pp(eqn.primitive.name) >> pp_params(eqn.params) >> pp(' '.join(names[x] if isinstance(x, Var) else str(x.val) for x in eqn.inputs))) return lhs >> pp(' = ') >> rhs def pp_params(params: Dict[str, Any]) -> PPrint: items = sorted(params.items()) if items: return pp(' [ ') >> vcat([pp(f'{k}={v}') for k, v in items]) >> pp(' ] ') else: return pp(' ') Jaxpr.__repr__ = lambda self: str(pp_jaxpr(self)) # - jaxpr, consts, _ = make_jaxpr_v1(lambda x: 2. * x, raise_to_shaped(get_aval(3.))) print(jaxpr) print(typecheck_jaxpr(jaxpr)) # But there's a limitation here: because of how `find_top_trace` operates by # data dependence, `make_jaxpr_v1` can't stage out all the primitive operations # performed by the Python callable it's given. For example: jaxpr, consts, _ = make_jaxpr_v1(lambda: mul(2., 2.)) print(jaxpr) # This is precisely the issue that # [omnistaging](https://github.com/google/jax/pull/3370) fixed. # We want to ensure that the `JaxprTrace` started by `make_jaxpr` is always # applied, regardless of whether any inputs to `bind` are boxed in corresponding # `JaxprTracer` instances. We can achieve this by employing the `dynamic_trace` # global defined in Part 1: # + @contextmanager def new_dynamic(main: MainTrace): global dynamic_trace prev_dynamic_trace, dynamic_trace = dynamic_trace, main try: yield finally: dynamic_trace = prev_dynamic_trace @lru_cache() def make_jaxpr(f, *avals_in): avals_in, in_tree = tree_flatten(avals_in) f, out_tree = flatten_fun(f, in_tree) builder = JaxprBuilder() with new_main(JaxprTrace, builder) as main: with new_dynamic(main): trace = JaxprTrace(main) tracers_in = [trace.new_arg(aval) for aval in avals_in] outs = f(*tracers_in) tracers_out = [full_raise(trace, out) for out in outs] jaxpr, consts = builder.build(tracers_in, tracers_out) return jaxpr, consts, out_tree() jaxpr, consts, _ = make_jaxpr(lambda: mul(2., 2.)) print(jaxpr) # - # Using `dynamic_trace` this way is conceptually the same as stashing the # current interpreter stack and starting a new one with the `JaxprTrace` at the # bottom. That is, no interpreters lower in the stack than the `dynamic_trace` # are applied (since `JaxprTrace.process_primitive` doesn't call `bind`), though # if the Python callable being traced to a jaxpr itself uses transformations # then those can be pushed onto the interpreter stack above the `JaxprTrace`. # But temporarily stashing the interpreter stack would break up the system # state. The `dynamic_trace` tag achieves the same goals while keeping the # system state simpler. # That's it for jaxprs! With jaxprs in hand, we can implement the remaining # major JAX features. # ## Part 3: `jit`, simplified # # While `jit` has a transformation-like API in that it accepts a Python callable # as an argument, under the hood it's really a higher-order primitive rather # than a transformation. A primitive is _higher-order_ when it's parameterized # by a function. # ### On-the-fly ("final style") and staged ("initial style") processing # # There are two options for how to handle higher-order primitives. Each requires # a different approach to tracing and engenders different tradeoffs: # 1. **On-the-fly processing, where `bind` takes a Python callable as an # argument.** We defer forming a jaxpr until as late as possible, namely # until we're running the final interpreter at the bottom of the interpreter # stack. That way we can swap a `JaxprTrace` in at the bottom of the # interpreter stack and thus stage out rather than execute all primitive # operations. With this approach, transformations in the stack get applied as # we execute the Python callable as usual. This approach can be very tricky # to implement, but it's as general as possible because it allows # higher-order primitives not to raise the abstraction level of their # arguments and thus allows data-dependent Python control flow. We refer to # this approach as using a "final-style higher-order primitive" employing the # discharge-at-tracing-time "final-style transformations" we've used so far. # 2. **Staged processing, where `bind` takes a jaxpr as an argument.** Before we # call `bind`, in the primitive wrapper we can just use `make_jaxpr` to form # a jaxpr up-front and be done with the Python callable entirely. In this # case, `make_jaxpr` puts its `JaxprTrace` at the top of the interpreter # stack, and no transformations lower in the stack, which might enter via # closed-over Tracers, are applied to the Python callable as we trace it. # (Transformations applied within the Python callable are applied as usual, # being added to the stack above the JaxprTrace.) Instead, the # transformations lower in the stack are later applied to the call primitive, # and the call primitive's rules must then transform the jaxpr itself. # Because we trace to a jaxpr up-front, this approach can't support # data-dependent Python control flow, but it is more straightforward to # implement. We refer to this kind of higher-order primitive as an # "initial-style higher-order primitive", and say that its jaxpr-processing # transformation rules are "initial-style transformation rules." # # The latter approach fits for `jit` because we don't need to support # data-dependent Python control flow in the user-provided Python callable, as # the whole purpose of `jit` is to stage computation out of Python to be # executed by XLA. (In contrast, `custom_jvp` is a higher-order primitive in # which we want to support data-dependent Python control flow.) # # Historically, we started using the "initial-style" and "final-style" # terminology after reading the [typed tagless final # interpreters](http://okmij.org/ftp/tagless-final/index.html) paper, and # jokingly referring to JAX as an implementation of "untyped tagful final # interpreters." We don't claim to carry over (or understand) any deep meaning # behind these terms; we loosely use "initial style" to mean "build an AST and # then transform it", and we use "final style" to mean "transform as we trace." # But it's just imprecise yet sticky jargon. # With the initial-style approach, here's the user-facing `jit` wrapper: # + def jit(f): def f_jitted(*args): avals_in = [raise_to_shaped(get_aval(x)) for x in args] jaxpr, consts, out_tree = make_jaxpr(f, *avals_in) outs = bind(xla_call_p, *consts, *args, jaxpr=jaxpr, num_consts=len(consts)) return tree_unflatten(out_tree, outs) return f_jitted xla_call_p = Primitive('xla_call') # - # With any new primitive, we need to give it transformation rules, starting with # its evaluation rule. When we evaluate an application of the `xla_call` # primitive, we want to stage out out the computation to XLA. That involves # translating the jaxpr to an XLA HLO program, transferring the argument values # to the XLA device, executing the XLA program, and transferring back the # results. We'll cache the XLA HLO compilation so that for each `jit`ted # function it only needs to be performed once per argument shape and dtype # signature. # # First, some utilities. class IDHashable: val: Any def __init__(self, val): self.val = val def __hash__(self) -> int: return id(self.val) def __eq__(self, other): return type(other) is IDHashable and id(self.val) == id(other.val) # Next, we'll define the evaluation rule for `xla_call`: # + from jax.lib import xla_bridge as xb from jax.lib import xla_client as xc xe = xc._xla xops = xc._xla.ops def xla_call_impl(*args, jaxpr: Jaxpr, num_consts: int): consts, args = args[:num_consts], args[num_consts:] hashable_consts = tuple(map(IDHashable, consts)) execute = xla_callable(IDHashable(jaxpr), hashable_consts) return execute(*args) impl_rules[xla_call_p] = xla_call_impl @lru_cache() def xla_callable(hashable_jaxpr: IDHashable, hashable_consts: Tuple[IDHashable]): jaxpr: Jaxpr = hashable_jaxpr.val consts = [x.val for x in hashable_consts] in_avals = [v.aval for v in jaxpr.in_binders[len(consts):]] c = xb.make_computation_builder('xla_call') xla_consts = _xla_consts(c, consts) xla_params = _xla_params(c, in_avals) outs = jaxpr_subcomp(c, jaxpr, xla_consts + xla_params) out = xops.Tuple(c, outs) compiled = xb.get_backend(None).compile(c.build(out)) return partial(execute_compiled, compiled, [v.aval for v in jaxpr.outs]) def _xla_consts(c: xe.XlaBuilder, consts: List[Any]) -> List[xe.XlaOp]: unique_consts = {id(cnst): cnst for cnst in consts} xla_consts = { id_: xops.ConstantLiteral(c, cnst) for id_, cnst in unique_consts.items()} return [xla_consts[id(cnst)] for cnst in consts] def _xla_params(c: xe.XlaBuilder, avals_in: List[ShapedArray]) -> List[xe.XlaOp]: return [xb.parameter(c, i, _xla_shape(a)) for i, a in enumerate(avals_in)] def _xla_shape(aval: ShapedArray) -> xe.Shape: return xc.Shape.array_shape(xc.dtype_to_etype(aval.dtype), aval.shape) # - # The main action is in `xla_callable`, which compiles a jaxpr into an XLA HLO # program using `jaxpr_subcomp`, then returns a callable which executes the # compiled program: # + def jaxpr_subcomp(c: xe.XlaBuilder, jaxpr: Jaxpr, args: List[xe.XlaOp] ) -> xe.XlaOp: env: Dict[Var, xe.XlaOp] = {} def read(x: Atom) -> xe.XlaOp: return env[x] if type(x) is Var else xb.constant(c, x.val) def write(v: Var, val: xe.XlaOp) -> None: env[v] = val map(write, jaxpr.in_binders, args) for eqn in jaxpr.eqns: in_avals = [x.aval for x in eqn.inputs] in_vals = map(read, eqn.inputs) rule = xla_translations[eqn.primitive] out_vals = rule(c, in_avals, in_vals, **eqn.params) map(write, eqn.out_binders, out_vals) return map(read, jaxpr.outs) def execute_compiled(compiled, out_avals, *args): input_bufs = [input_handlers[type(x)](x) for x in args] out_bufs = compiled.execute(input_bufs) return [handle_result(aval, buf) for aval, buf in zip(out_avals, out_bufs)] default_input_handler = xb.get_backend(None).buffer_from_pyval input_handlers = {ty: default_input_handler for ty in [int, float, np.ndarray, np.float64, np.float32]} def handle_result(aval: ShapedArray, buf): del aval # Unused for now. return buf.to_py() xla_translations = {} # - # Notice that `jaxpr_subcomp` has the structure of a simple interpreter. That's # a common pattern: the way we process jaxprs is usually with an interpreter. # And as with any interpreter, we need an interpretation rule for each # primitive: # + def direct_translation(op, c, in_avals, in_vals): del c, in_avals return [op(*in_vals)] xla_translations[add_p] = partial(direct_translation, xops.Add) xla_translations[mul_p] = partial(direct_translation, xops.Mul) xla_translations[neg_p] = partial(direct_translation, xops.Neg) xla_translations[sin_p] = partial(direct_translation, xops.Sin) xla_translations[cos_p] = partial(direct_translation, xops.Cos) xla_translations[greater_p] = partial(direct_translation, xops.Gt) def reduce_sum_translation(c, in_avals, in_vals, *, axis): (x_aval,), (x,) = in_avals, in_vals zero = xops.ConstantLiteral(c, np.array(0, x_aval.dtype)) subc = xb.make_computation_builder('add') shape = _xla_shape(ShapedArray((), x_aval.dtype)) xops.Add(xops.Parameter(subc, 0, shape), xops.Parameter(subc, 1, shape)) return [xops.Reduce(c, [x], [zero], subc.build(), [axis])] xla_translations[reduce_sum_p] = reduce_sum_translation def broadcast_translation(c, in_avals, in_vals, *, shape, axes): x, = in_vals dims_complement = [i for i in range(len(shape)) if i not in axes] return [xops.BroadcastInDim(x, shape, dims_complement)] xla_translations[broadcast_p] = broadcast_translation # - # With that, we can now use `jit` to stage out, compile, and execute programs # with XLA! @jit def f(x, y): print('tracing!') return sin(x) * cos(y) z = f(3., 4.) # 'tracing!' prints the first time print(z) z = f(4., 5.) # 'tracing!' doesn't print, compilation cache hit! print(z) # + @jit def f(x): return reduce_sum(x, axis=0) print(f(np.array([1., 2., 3.]))) # + def f(x): y = sin(x) * 2. z = - y + x return z def deriv(f): return lambda x: jvp(f, (x,), (1.,))[1] print( deriv(deriv(f))(3.)) print(jit(deriv(deriv(f)))(3.)) # - # Instead of implementing `jit` to first trace to a jaxpr and then to lower the # jaxpr to XLA HLO, it might appear that we could have skipped the jaxpr step # and just lowered to HLO while tracing. That is, perhaps we could have instead # implemented `jit` with a `Trace` and `Tracer` that appended to the XLA HLO # graph incrementally on each primitive bind. That's correct for now, but won't # be possible when we introduce compiled SPMD computations because there we must # know the number of replicas needed before compiling the program. # We haven't yet defined any transformation rules for `xla_call_p` other than # its evaluation rule. That is, we can't yet do `vmap`-of-`jit` or # `jvp`-of-`jit` or even `jit`-of`-jit`. Instead `jit` has to be at the "top # level." Let's fix that! # + def xla_call_jvp_rule(primals, tangents, *, jaxpr, num_consts): del num_consts # Unused. new_jaxpr, new_consts = jvp_jaxpr(jaxpr) outs = bind(xla_call_p, *new_consts, *primals, *tangents, jaxpr=new_jaxpr, num_consts=len(new_consts)) n = len(outs) // 2 primals_out, tangents_out = outs[:n], outs[n:] return primals_out, tangents_out jvp_rules[xla_call_p] = xla_call_jvp_rule @lru_cache() def jvp_jaxpr(jaxpr: Jaxpr) -> Tuple[Jaxpr, List[Any]]: def jvp_traceable(*primals_and_tangents): n = len(primals_and_tangents) // 2 primals, tangents = primals_and_tangents[:n], primals_and_tangents[n:] return jvp(jaxpr_as_fun(jaxpr), primals, tangents) in_avals = [v.aval for v in jaxpr.in_binders] new_jaxpr, new_consts, _ = make_jaxpr(jvp_traceable, *in_avals, *in_avals) return new_jaxpr, new_consts # + def xla_call_vmap_rule(axis_size, vals_in, dims_in, *, jaxpr, num_consts): del num_consts # Unused. new_jaxpr, new_consts = vmap_jaxpr(jaxpr, axis_size, tuple(dims_in)) outs = bind(xla_call_p, *new_consts, *vals_in, jaxpr=new_jaxpr, num_consts=len(new_consts)) return outs, [0] * len(outs) vmap_rules[xla_call_p] = xla_call_vmap_rule @lru_cache() def vmap_jaxpr(jaxpr: Jaxpr, axis_size: int, bdims_in: Tuple[BatchAxis, ...] ) -> Tuple[Jaxpr, List[Any]]: vmap_traceable = vmap(jaxpr_as_fun(jaxpr), tuple(bdims_in)) in_avals = [unmapped_aval(axis_size, d, v.aval) for v, d in zip(jaxpr.in_binders, bdims_in)] new_jaxpr, new_consts, _ = make_jaxpr(vmap_traceable, *in_avals) return new_jaxpr, new_consts def unmapped_aval(axis_size: int, batch_dim: BatchAxis, aval: ShapedArray ) -> ShapedArray: if batch_dim is not_mapped: return aval else: shape = list(aval.shape) shape.insert(batch_dim, axis_size) return ShapedArray(tuple(shape), aval.dtype) # + def xla_call_abstract_eval_rule(*in_types, jaxpr, num_consts): del num_consts # Unused. jaxpr_type = typecheck_jaxpr(jaxpr) if not all(t1 == t2 for t1, t2 in zip(jaxpr_type.in_types, in_types)): raise TypeError return jaxpr_type.out_types abstract_eval_rules[xla_call_p] = xla_call_abstract_eval_rule def xla_call_translation(c, in_avals, in_vals, *, jaxpr, num_consts): del num_consts # Only used at top-level. # Calling jaxpr_subcomp directly would inline. We generate a Call HLO instead. subc = xb.make_computation_builder('inner xla_call') xla_params = _xla_params(subc, in_avals) outs = jaxpr_subcomp(subc, jaxpr, xla_params) subc = subc.build(xops.Tuple(subc, outs)) return destructure_tuple(c, xops.Call(c, subc, in_vals)) xla_translations[xla_call_p] = xla_call_translation def destructure_tuple(c, tup): num_elements = len(c.get_shape(tup).tuple_shapes()) return [xops.GetTupleElement(tup, i) for i in range(num_elements)] # + @jit def f(x): print('tracing!') y = sin(x) * 2. z = - y + x return z x, xdot = 3., 1. y, ydot = jvp(f, (x,), (xdot,)) print(y) print(ydot) # - y, ydot = jvp(f, (x,), (xdot,)) # 'tracing!' not printed ys = vmap(f, (0,))(np.arange(3.)) print(ys) # One piece missing is device memory persistence for arrays. That is, we've # defined `handle_result` to transfer results back to CPU memory as NumPy # arrays, but it's often preferable to avoid transferring results just to # transfer them back for the next operation. We can do that by introducing a # `DeviceArray` class, which can wrap XLA buffers and otherwise duck-type # `numpy.ndarray`s: # + def handle_result(aval: ShapedArray, buf): # noqa: F811 return DeviceArray(aval, buf) class DeviceArray: buf: Any aval: ShapedArray def __init__(self, aval, buf): self.aval = aval self.buf = buf dtype = property(lambda self: self.aval.dtype) shape = property(lambda self: self.aval.shape) ndim = property(lambda self: self.aval.ndim) def __array__(self): return self.buf.to_py() def __repr__(self): return repr(self.buf.to_py()) def __str__(self): return str(self.buf.to_py()) _neg = staticmethod(neg) _add = staticmethod(add) _radd = staticmethod(add) _mul = staticmethod(mul) _rmul = staticmethod(mul) _gt = staticmethod(greater) input_handlers[DeviceArray] = lambda x: x.buf jax_types.add(DeviceArray) # + @jit def f(x): y = sin(x) * 2. z = - y + x return z x, xdot = 3., 1. y, ydot = jvp(f, (x,), (xdot,)) print(y) print(ydot) # - # ## Part 4: `linearize` and `vjp` (and `grad`!) # # The `linearize` and `vjp` autodiff functions are built on `jvp`, but involve # jaxprs as well. That's because both involve staging out, or delaying, # computation. # ### `linearize` # # In the case of `linearize`, we want to stage out the linear part of a `jvp` # computation. That is, if we have `jvp : (a -> b) -> (a, T a) -> (b, T b)`, # then we write `linearize : (a -> b) -> a -> (b, T a -o T b)`, using `T a` to # mean "the tangent type of `a`" and using the "lollipop" `-o` rather than the # arrow `->` to indicate a _linear_ function. We define the semantics of # `linearize` in terms of `jvp` too: # ```python # y, f_lin = linearize(f, x) # y_dot = f_lin(x_dot) # ``` # gives the same result for `(y, y_dot)` as # ``` # y, y_dot = jvp(f, (x,), (x_dot,)) # ``` # where the application of `f_lin` does not redo any of the linearization work. # We'll represent the delayed linear part `f_lin : T a -o T b` as a jaxpr. # # To build the `f_lin` jaxpr from a JVP, we need to perform partial evaluation: # we evaluate all the primal values as we trace, but stage the tangent # computations into a jaxpr. This is our second way to build jaxprs. But where # `make_jaxpr` and its underlying `JaxprTrace`/`JaxprTracer` interpreters aim # to stage out every primitive bind, this second approach stages out only those # primitive binds with a data dependence on tangent inputs. # # First, some utilities: # + def split_list(lst: List[Any], n: int) -> Tuple[List[Any], List[Any]]: return lst[:n], lst[n:] def split_half(lst: List[Any]) -> Tuple[List[Any], List[Any]]: assert not len(lst) % 2 return split_list(lst, len(lst) // 2) def partition_list(bs: List[bool], l: List[Any]) -> Tuple[List[Any], List[Any]]: lists = lst1, lst2 = [], [] for b, x in zip(bs, l): lists[b].append(x) return lst1, lst2 # - # Next, we'll write `linearize` by combining `jvp` together with a general # partial evaluation transformation, to be added next: # + def linearize_flat(f, *primals_in): pvals_in = ([PartialVal.known(x) for x in primals_in] + [PartialVal.unknown(vspace(get_aval(x))) for x in primals_in]) def f_jvp(*primals_tangents_in): primals_out, tangents_out = jvp(f, *split_half(primals_tangents_in)) return [*primals_out, *tangents_out] jaxpr, pvals_out, consts = partial_eval_flat(f_jvp, pvals_in) primal_pvals, _ = split_half(pvals_out) assert all(pval.is_known for pval in primal_pvals) primals_out = [pval.const for pval in primal_pvals] f_lin = lambda *tangents: eval_jaxpr(jaxpr, [*consts, *tangents]) return primals_out, f_lin def linearize(f, *primals_in): primals_in_flat, in_tree = tree_flatten(primals_in) f, out_tree = flatten_fun(f, in_tree) primals_out_flat, f_lin_flat = linearize_flat(f, *primals_in_flat) primals_out = tree_unflatten(out_tree(), primals_out_flat) def f_lin(*tangents_in): tangents_in_flat, in_tree2 = tree_flatten(tangents_in) if in_tree != in_tree2: raise TypeError tangents_out_flat = f_lin_flat(*tangents_in_flat) return tree_unflatten(out_tree(), tangents_out_flat) return primals_out, f_lin def vspace(aval: ShapedArray) -> ShapedArray: return raise_to_shaped(aval) # TODO handle integers? # - # Now we turn to the general partial evaluation transformation. The goal is to # accept a Python callable and a list of inputs, some known and some unknown, # and to produce (1) all the outputs which can be computed from the known # inputs, together with (2) a jaxpr representing the part of the Python # callable's computation which can only be performed after the remaining inputs # are known. # # This transformation can't be summarized purely in a type signature because its # behavior relies on the data dependencies inside the given Python callable and # not just its type. Nevertheless a heuristic type signature is useful. If we # assume the input function's type signature is `(a1, a2) -> (b1, b2)`, where # `a1` and `a2` represent the known and unknown inputs, respectively, and where # `b1` only has a data dependency on `a1` while `b2` has some data dependency on # `a2`, then we might write # # ``` # partial_eval : ((a1, a2) -> (b1, b2)) -> a1 -> (b1, res, (res, a2) -> b2) # ``` # # In words, given values for the inputs of type `a1`, `partial_eval` produces # the outputs of type `b1` along with "residual" values of type `res` # representing the intermediates required to complete the computation in the # second stage. It also produces a function of type `(res, a2) -> b2` which # accepts the residual values as well as the remaining inputs and produces the # remaining outputs. # # We like to think of partial evaluation as "unzipping" one computation into # two. For example, consider this jaxpr: # ``` # { lambda a:float64[] . # let b:float64[] = sin a # c:float64[] = neg b # in ( c ) } # ``` # A jaxpr for the JVP would look like: # ``` # { lambda a:float64[] b:float64 . # let c:float64[] = sin a # d:float64[] = cos a # e:float64[] = mul d b # f:float64[] = neg c # g:float64[] = neg e # in ( f, g ) } # ``` # If we imagine applying partial evaluation to this jaxpr with the first input # known and the second unknown, we end up 'unzipping' the JVP jaxpr into primal # and tangent jaxprs: # ``` # { lambda a:float64[] . # let c:float64[] = sin a # d:float64[] = cos a # f:float64[] = neg c # in ( f, d ) } # ``` # ``` # { lambda d:float64[] b:float64[] . # let e:float64[] = mul d b # g:float64[] = neg e # in ( g ) } # ``` # This second jaxpr is represents the linear computation that we want from # `linearize`. # # However, unlike in this jaxpr example, we want the computation on known values # to occur while evaluating the input Python callable. That is, rather than # forming a jaxpr for the entire function `(a1, a2) -> (b1, b2)`, staging all # operations out of Python first before sorting out what can be evaluated now # and what must be delayed, we want only to form a jaxpr for those operations # that _must_ be delayed due to a dependence on unknown inputs. In the context # of automatic differentiation, this is the feature ultimately enables us to # handle functions like `grad(lambda x: x**2 if x > 0 else 0.)`. Python control # flow works because partial evaluation keeps the primal computation in Python. # As a consequence, our `Trace` and `Tracer` subclasses must on the fly sort out # what can be evaluated and what must be staged out into a jaxpr. # # First, we start with a `PartialVal` class, which represents a value that can # be either known or unknown: class PartialVal(NamedTuple): aval: ShapedArray const: Optional[Any] @classmethod def known(cls, val: Any): return PartialVal(get_aval(val), val) @classmethod def unknown(cls, aval: ShapedArray): return PartialVal(aval, None) is_known = property(lambda self: self.const is not None) is_unknown = property(lambda self: self.const is None) # Partial evaluation will take a list of `PartialVal`s representing inputs, and # return a list of `PartialVal` outputs along with a jaxpr representing the # delayed computation: def partial_eval_flat(f, pvals_in: List[PartialVal]): with new_main(PartialEvalTrace) as main: trace = PartialEvalTrace(main) tracers_in = [trace.new_arg(pval) for pval in pvals_in] outs = f(*tracers_in) tracers_out = [full_raise(trace, out) for out in outs] jaxpr, consts = tracers_to_jaxpr(tracers_in, tracers_out) pvals_out = [t.pval for t in tracers_out] return jaxpr, pvals_out, consts # Next we need to implement `PartialEvalTrace` and its `PartialEvalTracer`. This # interpreter will build a jaxpr on the fly while tracking data dependencies. To # do so, it builds a bipartite directed acyclic graph (DAG) between # `PartialEvalTracer` nodes, representing staged-out values, and `JaxprRecipe` # nodes, representing formulas for how to compute some values from others. One # kind of recipe is a `JaxprEqnRecipe`, corresponding to a `JaxprEqn`'s primitive # application, but we also have recipe types for constants and lambda binders: # + from weakref import ref, ReferenceType class LambdaBindingRecipe(NamedTuple): pass class ConstRecipe(NamedTuple): val: Any class JaxprEqnRecipe: prim: Primitive tracers_in: List['PartialEvalTracer'] params: Dict[str, Any] avals_out: List[ShapedArray] tracer_refs_out: List['ReferenceType[PartialEvalTracer]'] def __init__(self, prim, tracers_in, params, avals_out, tracer_refs_out): self.prim = prim self.tracers_in = tracers_in self.params = params self.avals_out = avals_out self.tracer_refs_out = tracer_refs_out JaxprRecipe = Union[LambdaBindingRecipe, ConstRecipe, JaxprEqnRecipe] # - class PartialEvalTracer(Tracer): pval: PartialVal recipe: JaxprRecipe def __init__(self, trace, pval, recipe): self._trace = trace self.pval = pval self.recipe = recipe @property def aval(self): return self.pval.aval def full_lower(self): if self.pval.is_known: return full_lower(self.pval.const) return self # The `PartialEvalTrace` contains the logic for constructing the graph of # `JaxprRecipe`s and `PartialEvalTracer`s. Each argument corresponds to a # `LambdaBindingRecipe` leaf node, and each constant is a `ConstRecipe` leaf # node holding a reference to the constant. All other tracers and recipes come # from `process_primitive`, which forms tracers with `JaxprEqnRecipe`s. # # For most primitives, the `process_primitive` logic is straightforward: if all # inputs are known then we can bind the primitive on the known values # (evaluating it in Python) and avoid forming tracers corresponding to the # output. If instead any input is unknown then we instead stage out into a # `JaxprEqnRecipe` representing the primitive application. To build the tracers # representing unknown outputs, we need avals, which get from the abstract eval # rules. (Notice that tracers reference `JaxprEqnRecipe`s, and `JaxprEqnRecipe`s # reference tracers; we avoid circular garbage by using weakrefs.) # # That `process_primitive` logic applies to most primitives, but `xla_call_p` # requires recursive treatment. So we special-case its rule in a # `partial_eval_rules` dict. # + class PartialEvalTrace(Trace): def new_arg(self, pval: PartialVal) -> Any: return PartialEvalTracer(self, pval, LambdaBindingRecipe()) def lift(self, val: Any) -> PartialEvalTracer: return PartialEvalTracer(self, PartialVal.known(val), None) pure = lift def instantiate_const(self, tracer: PartialEvalTracer) -> PartialEvalTracer: if tracer.pval.is_unknown: return tracer else: pval = PartialVal.unknown(raise_to_shaped(tracer.aval)) return PartialEvalTracer(self, pval, ConstRecipe(tracer.pval.const)) def process_primitive(self, primitive, tracers, params): if all(t.pval.is_known for t in tracers): return bind(primitive, *map(full_lower, tracers), **params) rule = partial_eval_rules.get(primitive) if rule: return rule(self, tracers, **params) tracers_in = [self.instantiate_const(t) for t in tracers] avals_in = [t.aval for t in tracers_in] avals_out = abstract_eval_rules[primitive](*avals_in, **params) tracers_out = [PartialEvalTracer(self, PartialVal.unknown(aval), None) for aval in avals_out] eqn = JaxprEqnRecipe(primitive, tracers_in, params, avals_out, map(ref, tracers_out)) for t in tracers_out: t.recipe = eqn return tracers_out partial_eval_rules = {} # - # Now that we can build graph representations of jaxprs with `PartialEvalTrace`, # we need a mechanism to convert the graph representation to a standard jaxpr. # The jaxpr corresponds to a topological sort of the graph. # + def tracers_to_jaxpr(tracers_in: List[PartialEvalTracer], tracers_out: List[PartialEvalTracer]): tracers_in = [t for t in tracers_in if t.pval.is_unknown] tracers_out = [t for t in tracers_out if t.pval.is_unknown] tracer_to_var = {id(t): Var(raise_to_shaped(t.aval)) for t in tracers_in} constvar_to_val = {} constid_to_var = {} processed_eqns = set() eqns = [] for t in toposort(tracers_out, tracer_parents): if isinstance(t.recipe, LambdaBindingRecipe): assert id(t) in set(map(id, tracers_in)) elif isinstance(t.recipe, ConstRecipe): val = t.recipe.val var = constid_to_var.get(id(val)) if var is None: aval = raise_to_shaped(get_aval(val)) var = tracer_to_var[id(t)] = constid_to_var[id(val)] = Var(aval) constvar_to_val[var] = val elif isinstance(t.recipe, JaxprEqnRecipe): if id(t.recipe) not in processed_eqns: eqns.append(recipe_to_eqn(tracer_to_var, t.recipe)) processed_eqns.add(id(t.recipe)) else: raise TypeError(t.recipe) constvars, constvals = unzip2(constvar_to_val.items()) in_binders = constvars + [tracer_to_var[id(t)] for t in tracers_in] out_vars = [tracer_to_var[id(t)] for t in tracers_out] jaxpr = Jaxpr(in_binders, eqns, out_vars) typecheck_jaxpr(jaxpr) return jaxpr, constvals def recipe_to_eqn(tracer_to_var: Dict[int, Var], recipe: JaxprEqnRecipe ) -> JaxprEqn: inputs = [tracer_to_var[id(t)] for t in recipe.tracers_in] out_binders = [Var(aval) for aval in recipe.avals_out] for t_ref, var in zip(recipe.tracer_refs_out, out_binders): if t_ref() is not None: tracer_to_var[id(t_ref())] = var return JaxprEqn(recipe.prim, inputs, recipe.params, out_binders) def tracer_parents(t: PartialEvalTracer) -> List[PartialEvalTracer]: return t.recipe.tracers_in if isinstance(t.recipe, JaxprEqnRecipe) else [] # + def toposort(out_nodes: List[Any], parents: Callable[[Any], List[Any]]): if not out_nodes: return [] out_nodes = remove_duplicates(out_nodes) child_counts = {} stack = list(out_nodes) while stack: node = stack.pop() if id(node) in child_counts: child_counts[id(node)] += 1 else: child_counts[id(node)] = 1 stack.extend(parents(node)) for node in out_nodes: child_counts[id(node)] -= 1 sorted_nodes = [] childless_nodes = [node for node in out_nodes if not child_counts[id(node)]] while childless_nodes: node = childless_nodes.pop() sorted_nodes.append(node) for parent in parents(node): if child_counts[id(parent)] == 1: childless_nodes.append(parent) else: child_counts[id(parent)] -= 1 sorted_nodes = sorted_nodes[::-1] check_toposort(sorted_nodes, parents) return sorted_nodes def remove_duplicates(lst): seen = set() return [x for x in lst if id(x) not in seen and not seen.add(id(x))] def check_toposort(nodes: List[Any], parents: Callable[[Any], List[Any]]): seen = set() for node in nodes: assert all(id(parent) in seen for parent in parents(node)) seen.add(id(node)) # - # Now we can linearize! y, sin_lin = linearize(sin, 3.) print(y, sin(3.)) print(sin_lin(1.), cos(3.)) # To handle `linearize`-of-`jit`, we still need to write a partial evaluation # rule for `xla_call_p`. Other than tracer bookkeeping, the main task is to # perform partial evaluation of a jaxpr, 'unzipping' it into two jaxprs. # + def xla_call_partial_eval(trace, tracers, *, jaxpr, num_consts): del num_consts # Unused. in_unknowns = [not t.pval.is_known for t in tracers] jaxpr1, jaxpr2, out_unknowns, num_res = partial_eval_jaxpr(jaxpr, in_unknowns) known_tracers, unknown_tracers = partition_list(in_unknowns, tracers) known_vals = [t.pval.const for t in known_tracers] outs1_res = bind(xla_call_p, *known_vals, jaxpr=jaxpr1, num_consts=0) outs1, res = split_list(outs1_res, len(jaxpr1.outs) - num_res) res_tracers = [trace.instantiate_const(full_raise(trace, x)) for x in res] outs2 = [PartialEvalTracer(trace, PartialVal.unknown(v.aval), None) for v in jaxpr2.outs] eqn = JaxprEqnRecipe(xla_call_p, res_tracers + unknown_tracers, dict(jaxpr=jaxpr2, num_consts=0), [v.aval for v in jaxpr2.outs], map(ref, outs2)) for t in outs2: t.recipe = eqn outs1, outs2 = iter(outs1), iter(outs2) return [next(outs2) if uk else next(outs1) for uk in out_unknowns] partial_eval_rules[xla_call_p] = xla_call_partial_eval def partial_eval_jaxpr(jaxpr: Jaxpr, in_unknowns: List[bool] ) -> Tuple[Jaxpr, Jaxpr, List[bool], int]: env: Dict[Var, bool] = {} residuals = set() def read(v: Atom) -> bool: if type(v) is Lit: raise NotImplementedError return env[v] def write(unk: bool, v: Var) -> None: env[v] = unk def new_res(v: Var) -> Var: return residuals.add(v) or v eqns1, eqns2 = [], [] map(write, in_unknowns, jaxpr.in_binders) for eqn in jaxpr.eqns: unks_in = map(read, eqn.inputs) rule = partial_eval_jaxpr_rules.get(eqn.primitive) if rule: eqn1, eqn2, unks_out, res = rule(unks_in, eqn) eqns1.append(eqn1); eqns2.append(eqn2); residuals.update(res) map(write, unks_out, eqn.out_binders) elif any(unks_in): inputs = [v if unk else new_res(v) for unk, v in zip(unks_in, eqn.inputs)] eqns2.append(JaxprEqn(eqn.primitive, inputs, eqn.params, eqn.out_binders)) map(partial(write, True), eqn.out_binders) else: eqns1.append(eqn) map(partial(write, False), eqn.out_binders) out_unknowns = map(read, jaxpr.outs) residuals, num_res = list(residuals), len(residuals) ins1, ins2 = partition_list(in_unknowns, jaxpr.in_binders) outs1, outs2 = partition_list(out_unknowns, jaxpr.outs) jaxpr1 = Jaxpr(ins1, eqns1, outs1 + residuals) jaxpr2 = Jaxpr(residuals + ins2, eqns2, outs2) typecheck_partial_eval_jaxpr(jaxpr, in_unknowns, out_unknowns, jaxpr1, jaxpr2) return jaxpr1, jaxpr2, out_unknowns, num_res def typecheck_partial_eval_jaxpr(jaxpr, unks_in, unks_out, jaxpr1, jaxpr2): jaxprty = typecheck_jaxpr(jaxpr) # (a1, a2) -> (b1, b2 ) jaxpr1ty = typecheck_jaxpr(jaxpr1) # a1 -> (b1, res) jaxpr2ty = typecheck_jaxpr(jaxpr2) # (res, a2) -> b2 a1, a2 = partition_list(unks_in, jaxprty.in_types) b1, b2 = partition_list(unks_out, jaxprty.out_types) b1_, res = split_list(jaxpr1ty.out_types, len(b1)) res_, a2_ = split_list(jaxpr2ty.in_types, len(res)) b2_ = jaxpr2ty.out_types if jaxpr1ty.in_types != a1: raise TypeError if jaxpr2ty.out_types != b2: raise TypeError if b1 != b1_: raise TypeError if res != res_: raise TypeError if a2 != a2_: raise TypeError if b2 != b2_: raise TypeError partial_eval_jaxpr_rules = {} def xla_call_peval_eqn(unks_in: List[bool], eqn: JaxprEqn ) -> Tuple[JaxprEqn, JaxprEqn, List[bool], List[Atom]]: jaxpr = eqn.params['jaxpr'] jaxpr1, jaxpr2, unks_out, num_res = partial_eval_jaxpr(jaxpr, unks_in) ins1, ins2 = partition_list(unks_in, eqn.inputs) outs1, outs2 = partition_list(unks_out, eqn.out_binders) residuals, _ = split_list(jaxpr2.in_binders, num_res) eqn1 = JaxprEqn(xla_call_p, ins1, dict(jaxpr=jaxpr1, num_consts=0), outs1 + residuals) eqn2 = JaxprEqn(xla_call_p, residuals + ins2, dict(jaxpr=jaxpr2, num_consts=0), outs2) return eqn1, eqn2, unks_out, residuals partial_eval_jaxpr_rules[xla_call_p] = xla_call_peval_eqn # - # With that, we can compose `linearize` and `jit` however we like: # + @jit def f(x): y = sin(x) * 2. z = - y + x return z y, f_lin = linearize(f, 3.) y_dot = f_lin(1.) print(y, y_dot) # + @jit def f(x): y = sin(x) * 2. z = g(x, y) return z @jit def g(x, y): return cos(x) + y y, f_lin = linearize(f, 3.) y_dot = f_lin(1.) print(y, y_dot) # - # ### `vjp` and `grad` # # The `vjp` transformation works a lot like linearize. Its type signature is # analogous: # # ``` # linearize : (a -> b) -> a -> (b, T a -o T b) # vjp : (a -> b) -> a -> (b, T b -o T a) # ``` # # The only difference is that we transpose the linear part of the computation # before returning it, so that it goes from type `T a -o T b` to type `T b -o T # a`. That is, we'll implement `vjp` as, essentially, # # ``` # def vjp(f, x): # y, f_lin = linearize(f, x) # f_vjp = lambda y_bar: transpose(f_lin)(y_bar) # return y, f_vjp # ``` # # Since we have the linear computation as a jaxpr, not just a Python callable, # we can implement the transpose transformation as a jaxpr interpreter. # + def vjp_flat(f, *primals_in): pvals_in = ([PartialVal.known(x) for x in primals_in] + [PartialVal.unknown(vspace(get_aval(x))) for x in primals_in]) primal_pvals_in, tangent_pvals_in = split_half(pvals_in) def f_jvp(*primals_tangents_in): primals_out, tangents_out = jvp(f, *split_half(primals_tangents_in)) return [*primals_out, *tangents_out] jaxpr, pvals_out, consts = partial_eval_flat(f_jvp, pvals_in) # linearize primal_pvals, _ = split_half(pvals_out) assert all(pval.is_known for pval in primal_pvals) primals_out = [pval.const for pval in primal_pvals] transpose_inputs = consts + [UndefPrimal(p.aval) for p in tangent_pvals_in] f_vjp = lambda *cts: eval_jaxpr_transposed(jaxpr, transpose_inputs, cts) return primals_out, f_vjp def vjp(f, *primals_in): primals_in_flat, in_tree = tree_flatten(primals_in) f, out_tree = flatten_fun(f, in_tree) primals_out_flat, f_vjp_flat = vjp_flat(f, *primals_in_flat) primals_out = tree_unflatten(out_tree(), primals_out_flat) def f_vjp(*cotangents_out): cotangents_out_flat, _ = tree_flatten(cotangents_out) cotangents_in_flat = f_vjp_flat(*cotangents_out_flat) return tree_unflatten(in_tree, cotangents_in_flat) return primals_out, f_vjp class UndefPrimal(NamedTuple): aval: ShapedArray register_pytree_node(UndefPrimal, lambda u: (u.aval, ()), lambda aval, _: UndefPrimal(aval)) # - # We use `UndefPrimal` instances to indicate which arguments with respect to # with we want to transpose. These arise because in general, being explicit # about closed-over values, we want to transpose functions of type # `a -> b -o c` to functions of type `a -> c -o b`. Even more generally, the # inputs with respect to which the function is linear could be scattered through # the argument list. So we indicate the linear positions using `UndefPrimal`. # We register `UndefPrimal` as a pytree node because the pytree mechanism gives # a handy way to prune these placeholders out of argument lists. # # Next, we can write `eval_jaxpr_transposed`, along with transpose rules for # all primitives which can be linear in at least one argument: # + # NB: the analogous function in JAX is called 'backward_pass' def eval_jaxpr_transposed(jaxpr: Jaxpr, args: List[Any], cotangents: List[Any] ) -> List[Any]: primal_env: Dict[Var, Any] = {} ct_env: Dict[Var, Any] = {} def read_primal(x: Atom) -> Any: return primal_env.get(x, UndefPrimal(x.aval)) if type(x) is Var else x.val def write_primal(v: Var, val: Any) -> None: if type(val) is not UndefPrimal: primal_env[v] = val def read_cotangent(v: Var) -> Any: return ct_env.pop(v, np.zeros(v.aval.shape, v.aval.dtype)) def write_cotangent(x: Atom, val: Any): if type(x) is Var and val is not None: ct_env[x] = add(ct_env[x], val) if x in ct_env else val map(write_primal, jaxpr.in_binders, args) map(write_cotangent, jaxpr.outs, cotangents) for eqn in jaxpr.eqns[::-1]: primals_in = map(read_primal, eqn.inputs) cts_in = map(read_cotangent, eqn.out_binders) rule = transpose_rules[eqn.primitive] cts_out = rule(cts_in, *primals_in, **eqn.params) map(write_cotangent, eqn.inputs, cts_out) return [read_cotangent(v) for v, x in zip(jaxpr.in_binders, args) if type(x) is UndefPrimal] transpose_rules = {} # + def mul_transpose_rule(cts, x, y): z_bar, = cts assert (type(x) is UndefPrimal) ^ (type(y) is UndefPrimal) return [mul(z_bar, y), None] if type(x) is UndefPrimal else [None, mul(x, z_bar)] transpose_rules[mul_p] = mul_transpose_rule def neg_transpose_rule(cts, x): ybar, = cts assert type(x) is UndefPrimal return [neg(ybar)] transpose_rules[neg_p] = neg_transpose_rule def add_transpose_rule(cts, x, y): z_bar, = cts return [z_bar, z_bar] transpose_rules[add_p] = add_transpose_rule def xla_call_transpose_rule(cts, *invals, jaxpr, num_consts): del num_consts # Unused. undef_primals = [type(x) is UndefPrimal for x in invals] transposed_jaxpr, new_consts = transpose_jaxpr(jaxpr, tuple(undef_primals)) residuals, _ = partition_list(undef_primals, invals) outs = bind(xla_call_p, *new_consts, *residuals, *cts, jaxpr=transposed_jaxpr, num_consts=len(new_consts)) outs = iter(outs) return [next(outs) if undef else None for undef in undef_primals] transpose_rules[xla_call_p] = xla_call_transpose_rule @lru_cache() def transpose_jaxpr(jaxpr: Jaxpr, undef_primals: Tuple[bool, ...] ) -> Tuple[Jaxpr, List[Any]]: traceable = partial(eval_jaxpr_transposed, jaxpr) avals_in, avals_out = typecheck_jaxpr(jaxpr) args = [UndefPrimal(a) if u else a for a, u in zip(avals_in, undef_primals)] trans_jaxpr, consts, _ = make_jaxpr(traceable, tuple(args), tuple(avals_out)) return trans_jaxpr, consts # - # Now that we can linearize and transpose, we can finally write `grad`: def grad(f): def gradfun(x, *xs): y, f_vjp = vjp(f, x, *xs) if np.shape(y) != (): raise TypeError x_bar, *_ = f_vjp(np.ones(np.shape(y), np.result_type(y))) return x_bar return gradfun y, f_vjp = vjp(sin, 3.) print(f_vjp(1.), cos(3.)) # + def f(x): y = sin(x) * 2. z = - y + x return z print(grad(f)(3.)) # + @jit def f(x): y = x * 2. z = g(y) return z @jit def g(x): return cos(x) * 2. print(grad(f)(3.)) # - # Here's something of a compositionality stress test: # + # from core_test.py fun_with_nested_calls_2 def foo(x): @jit def bar(y): def baz(w): q = jit(lambda x: y)(x) q = q + jit(lambda: y)() q = q + jit(lambda y: w + y)(y) q = jit(lambda w: jit(sin)(x) * y)(1.0) + q return q p, t = jvp(baz, (x + 1.0,), (y,)) return t + (x * p) return bar(x) def assert_allclose(*vals): for v1, v2 in zip(vals[:-1], vals[1:]): np.testing.assert_allclose(v1, v2) ans1 = f(3.) ans2 = jit(f)(3.) ans3, _ = jvp(f, (3.,), (5.,)) ans4, _ = jvp(jit(f), (3.,), (5.,)) assert_allclose(ans1, ans2, ans3, ans4) deriv1 = grad(f)(3.) deriv2 = grad(jit(f))(3.) deriv3 = jit(grad(jit(f)))(3.) _, deriv4 = jvp(f, (3.,), (1.,)) _, deriv5 = jvp(jit(f), (3.,), (1.,)) assert_allclose(deriv1, deriv2, deriv3, deriv4, deriv5) hess1 = grad(grad(f))(3.) hess2 = grad(grad(jit(f)))(3.) hess3 = grad(jit(grad(f)))(3.) hess4 = jit(grad(grad(f)))(3.) _, hess5 = jvp(grad(f), (3.,), (1.,)) _, hess6 = jvp(jit(grad(f)), (3.,), (1.,)) _, hess7 = jvp(jit(grad(f)), (3.,), (1.,)) assert_allclose(hess1, hess2, hess3, hess4, hess5, hess6, hess7)
docs/autodidax.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **This notebook is an exercise in the [Introduction to Machine Learning](https://www.kaggle.com/learn/intro-to-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/dansbecker/underfitting-and-overfitting).** # # --- # # ## Recap # You've built your first model, and now it's time to optimize the size of the tree to make better predictions. Run this cell to set up your coding environment where the previous step left off. # + # Code you have previously used to load data import pandas as pd from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor # Path of the file to read iowa_file_path = '../input/home-data-for-ml-course/train.csv' home_data = pd.read_csv(iowa_file_path) # Create target object and call it y y = home_data.SalePrice # Create X features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] X = home_data[features] # Split into validation and training data train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) # Specify Model iowa_model = DecisionTreeRegressor(random_state=1) # Fit Model iowa_model.fit(train_X, train_y) # Make validation predictions and calculate mean absolute error val_predictions = iowa_model.predict(val_X) val_mae = mean_absolute_error(val_predictions, val_y) print("Validation MAE: {:,.0f}".format(val_mae)) # Set up code checking from learntools.core import binder binder.bind(globals()) from learntools.machine_learning.ex5 import * print("\nSetup complete") # - # # Exercises # You could write the function `get_mae` yourself. For now, we'll supply it. This is the same function you read about in the previous lesson. Just run the cell below. def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(train_X, train_y) preds_val = model.predict(val_X) mae = mean_absolute_error(val_y, preds_val) return(mae) # ## Step 1: Compare Different Tree Sizes # Write a loop that tries the following values for *max_leaf_nodes* from a set of possible values. # # Call the *get_mae* function on each value of max_leaf_nodes. Store the output in some way that allows you to select the value of `max_leaf_nodes` that gives the most accurate model on your data. # + candidate_max_leaf_nodes = [5, 25, 50, 100, 250, 500] # Write loop to find the ideal tree size from candidate_max_leaf_nodes mae_scores = {} for size in candidate_max_leaf_nodes: mae_scores[size] = get_mae(size, train_X, val_X, train_y, val_y) # Store the best value of max_leaf_nodes (it will be either 5, 25, 50, 100, 250 or 500) best_tree_size = min(mae_scores, key=mae_scores.get) step_1.check() # + # The lines below will show you a hint or the solution. # step_1.hint() # step_1.solution() # - # ## Step 2: Fit Model Using All Data # You know the best tree size. If you were going to deploy this model in practice, you would make it even more accurate by using all of the data and keeping that tree size. That is, you don't need to hold out the validation data now that you've made all your modeling decisions. # + # Fill in argument to make optimal size and uncomment final_model = DecisionTreeRegressor(max_leaf_nodes=best_tree_size, random_state=1) # fit the final model and uncomment the next two lines final_model.fit(X, y) step_2.check() # + # step_2.hint() # step_2.solution() # - # You've tuned this model and improved your results. But we are still using Decision Tree models, which are not very sophisticated by modern machine learning standards. In the next step you will learn to use Random Forests to improve your models even more. # # # Keep Going # # You are ready for **[Random Forests](https://www.kaggle.com/dansbecker/random-forests).** # # --- # # # # # *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161285) to chat with other Learners.*
machine learning/underfitting-and-overfitting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="0_8KTjorPZ0s" colab_type="code" colab={} # %matplotlib inline import numpy as np from scipy import ndimage import matplotlib.pyplot as plt # + id="Zmya2EpuM5L2" colab_type="code" colab={} def convolve(x, w, padding=True): newx = ndimage.convolve(x, np.flip(w), mode='constant') if padding: return newx else: return newx[1:-1,1:-1] # + id="tpzpf5pAVP6l" colab_type="code" colab={} def visualize(x): fig, ax = plt.subplots(figsize=x.shape) ax.matshow(x, cmap=plt.cm.Blues, alpha=0.3) for i in range(x.shape[0]): for j in range(x.shape[1]): ax.text(x=j, y=i, s=x[i, j], va='center', ha='center') ax.grid(False) ax.set_xticks([]) ax.set_yticks([]) for i in np.linspace(0.5,x.shape[0]-1.5,x.shape[0]-1): ax.axhline(i, c='k', lw=1) for i in np.linspace(0.5,x.shape[1]-1.5,x.shape[1]-1): ax.axvline(i, c='k', lw=1) # + id="Ro_y0vRSVyu9" colab_type="code" colab={} x = np.random.randint(0, 3, (5, 5)) visualize(x) # + id="l5wNnDE-WUL1" colab_type="code" colab={} w = np.random.randint(0, 2, (3, 3)) visualize(w) # + id="VRljlRbESdfO" colab_type="code" colab={} newx = convolve(x, w, padding=False) # + id="3HIwxJ_YWc_x" colab_type="code" colab={} visualize(newx) # + id="46Dj2Y1ymX-H" colab_type="code" colab={} sobelx = np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]]) sobely = np.array([[1, 2, 1],[0, 0, 0],[-1, -2, -1]]) # + id="xmukNdvvmoUO" colab_type="code" colab={} visualize(sobely)
A-02 image convolution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Functions as first class objects # ### First class functions are defined as : # functions as first class objects are called as first-class functions # * Created at runtime # * Assigned to a variable and data structure # * Passed as an argument to a function # * Returned as a result of the function # Integers, Strings, and Dictionaries are examples of first-class functions def factorial(n): '''returns n!''' return 1 if n < 1 else n * factorial(n-1) factorial(42) factorial.__doc__ type(factorial) fact = factorial fact(5) map(fact, range(5)) list(map(fact, range(5))) # ### Higher Order Functions # #### Functions that takes a functions as an argument or returns a function as a result is a Higher Order Function. # **all(iterable)** : Returns True if every element of the iterable is truthy; all([]) returns True.</br> # **any(iterable)** : Returns True if any element of the iterable is truthy; any([]) returns False. # ### Anonymus Functions
notebooks/3.Functions_and_Objects.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt # ## Election data ballot_df = pd.read_csv("PyPoll/Resources/election_data.csv") ballot_df.head() # ## Dataframe and plots grouped by Candidate grouped_by_candidate_ballot_df = ballot_df.groupby("Candidate").count() del grouped_by_candidate_ballot_df["County"] grouped_by_candidate_ballot_df.head() grouped_by_candidate_ballot_df.plot(kind="bar", y="Voter ID", title="Voting Outcome") plt.ylabel("Number of Votes") grouped_by_candidate_ballot_df.plot(kind="pie", x="Candidate", y="Voter ID", title="Vote Percentages") # ## Dataframe and plots grouped by County grouped_by_county_ballot_df = ballot_df.groupby("County").count() del grouped_by_county_ballot_df["Candidate"] grouped_by_county_ballot_df.head() grouped_by_county_ballot_df.plot(kind="bar", y="Voter ID", title="County Vote Distribution") plt.ylabel("Number of Votes") grouped_by_county_and_candidate_ballot_df = ballot_df.groupby(["County", "Candidate"]).count() grouped_by_county_and_candidate_ballot_df.head(20) # ### Individual County Plots # + Counties = ballot_df["County"].unique() Counties for county in Counties: grouped_by_county_and_candidate_ballot_df.loc[county,:].plot(kind="bar", title=f"{county} County Voting Outcome") plt.ylabel("Number of Votes") grouped_by_county_and_candidate_ballot_df.loc[county,:].plot(kind="pie", x="Candidate", y="Voter ID", title=f"{county} County Vote Percentages")
.ipynb_checkpoints/Election Plotting-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] colab_type="text" id="fTFj8ft5dlbS" # ##### Copyright 2018 The TensorFlow Authors. # + cellView="form" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="lzyBOpYMdp3F" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + cellView="form" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="m_x4KfSJ7Vt7" #@title MIT License # # Copyright (c) 2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # + [markdown] colab_type="text" id="C9HmC2T4ld5B" # # 探索过拟合与欠拟合 # + [markdown] colab_type="text" id="kRTxFhXAlnl1" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/tutorials/keras/overfit_and_underfit"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/keras/overfit_and_underfit.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/overfit_and_underfit.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # + [markdown] colab_type="text" id="19rPukKZsPG6" # 与往常一样,此示例中的代码将使用 tf.keras API,您可以在 TensorFlow [Keras 指南](https://www.tensorflow.org/guide/keras)中了解更多信息。 # # 在之前的两个例子中 — 电影评论文本分类和预测住房价格 — 我们看到模型在验证数据上的准确率在经过多个 epoch 训练后会达到峰值,然后开始下降。 # # 换句话说,我们的模型在训练数据上<b>过拟合</b>了。学习如何处理过度拟合很重要。虽然可以在<b>训练集</b>上实现很高准确率,但我们真正需要的是开发出在<b>测试数据</b>(或者从未出现过的数据)同样表现较好的模型。 # # 过拟合相反的情况是<b>欠拟合</b>。当在测试数据上仍有提升空间时,意味着模型欠拟合。出现这种情况的原因有很多:如果模型不够强大,过度正则化,或者根本没有经过足够长时间的训练。这意味着网络还没有学习到训练数据中的相关特征。 # # 如果训练时间过长,模型将开始过拟合并从训练数据中学习到一些在测试数据上不具有泛化能力的特征。我们需要权衡模型训练的时间。我们将在后面学习如何控制模型训练的 epoch 数量,这对我们来说是一个很重要的技能。 # # 为了防止过拟合,最好的解决方案是使用更多的训练数据。使用大量数据训练的模型泛化能力会更好。当数据量不足时,下一个最佳解决方案是使用正规化等技术。这些限制了模型可以存储信息的数量和类型。 如果一个网络只能记住少量的模式,那么优化过程将迫使它专注于最突出的模式,这些模式有更好的概括性。 # # 在这个笔记本中,我们将探索两种常见的正则化技术 — 权重正则化和丢失 — 并使用它们来改进我们的 IMDB 电影评论文本分类。 # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="5pZ8A2liqvgk" import tensorflow as tf from tensorflow import keras import numpy as np import matplotlib.pyplot as plt print(tf.__version__) # + [markdown] colab_type="text" id="1cweoTiruj8O" # ## 下载 IMDB 数据集 # # 我们不会使用之前笔记本中提到的嵌入向量,而是对句子进行多重编码。该模型将很快在训练集上发生过拟合。它将用于证明何时发生过度拟合,以及如何避免。 # # 对数据进行热编码意味着将它们转换为只包含 0 和 1 的向量。具体地说,如果要将序列 `[3, 5]` 转换为10,000维向量,那么意味着除索引 3 和 5 是数字 1,其余都是 0。 # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="QpzE4iqZtJly" NUM_WORDS = 10000 (train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(num_words=NUM_WORDS) def multi_hot_sequences(sequences, dimension): # Create an all-zero matrix of shape (len(sequences), dimension) results = np.zeros((len(sequences), dimension)) for i, word_indices in enumerate(sequences): results[i, word_indices] = 1.0 # set specific indices of results[i] to 1s return results train_data = multi_hot_sequences(train_data, dimension=NUM_WORDS) test_data = multi_hot_sequences(test_data, dimension=NUM_WORDS) # + [markdown] colab_type="text" id="MzWVeXe3NBTn" # 让我们看一下生成的热编码向量。单词索引按频率排序,因此预计索引零附近有更多的 1 值,我们可以在此图中看到: # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="71kr5rG4LkGM" plt.plot(train_data[0]) # + [markdown] colab_type="text" id="lglk41MwvU5o" # ## 验证过拟合 # # 防止过度拟合的最简单方法是减小模型大小,即模型中需要学习的参数的数量(由层数和每层单元数决定)。在深度学习中,模型中参数数量通常被称为模型的「容量」。直观地,具有更多参数的模型将具有更多「记忆能力」,因此将能够容易地学习训练样本与其目标之间的完美的映射关系,但是该映射没有任何泛化能力,因此在未知的数据上预测没有任何意义。 # # 始终记住一点:深度学习模型擅长拟合训练数据,但真正的挑战是泛化,而不是拟合。 # # 另一方面,如果网络具有有限的记忆资源,则将不能容易地学习映射。为了最大限度地减少损失,它必须学习具有更强预测能力的压缩表示。同时,如果您使模型太小,则难以拟合训练数据。需要在「容量过大」和「容量不足」之间保持平衡。 # # 不幸的是,没有神奇的公式来确定模型的大小或架构(就层数或每层的单元数量而言)。您将不得不尝试使用一系列不同的架构。 # # 要找到合适的模型大小,最好从相对较少的网路层和参数开始,然后开始增加网络层的大小或添加新网络层,直到您看到在验证集上的损失递减为止。让我们在电影评论分类网络上试试。 # # 我们将仅使用```全连接```图层作为基准创建一个简单的模型,然后创建更小和更大的版本,并进行比较。 # + [markdown] colab_type="text" id="_ReKHdC2EgVu" # ### 创建基准模型 # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="QKgdXPx9usBa" baseline_model = keras.Sequential([ # `input_shape` is only required here so that `.summary` works. keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)), keras.layers.Dense(16, activation=tf.nn.relu), keras.layers.Dense(1, activation=tf.nn.sigmoid) ]) baseline_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy']) baseline_model.summary() # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="LqG3MXF5xSjR" baseline_history = baseline_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) # + [markdown] colab_type="text" id="L-DGRBbGxI6G" # ### 创建更小模型 # + [markdown] colab_type="text" id="SrfoVQheYSO5" # 让我们创建一个隐藏单元较少的模型,与我们刚刚创建的基准模型进行比较: # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="jksi-XtaxDAh" smaller_model = keras.Sequential([ keras.layers.Dense(4, activation=tf.nn.relu, input_shape=(NUM_WORDS,)), keras.layers.Dense(4, activation=tf.nn.relu), keras.layers.Dense(1, activation=tf.nn.sigmoid) ]) smaller_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy']) smaller_model.summary() # + [markdown] colab_type="text" id="jbngCZliYdma" # 在相同的数据集上训练: # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="Ofn1AwDhx-Fe" smaller_history = smaller_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) # + [markdown] colab_type="text" id="vIPuf23FFaVn" # ### 创建更大的模型 # # 作为练习,您可以创建一个更大的模型,并查看它开始过拟合的速度。接下来,让我们在这个基准测试中添加一个容量大得多的网络,远远超出问题的范围: # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="ghQwwqwqvQM9" bigger_model = keras.models.Sequential([ keras.layers.Dense(512, activation=tf.nn.relu, input_shape=(NUM_WORDS,)), keras.layers.Dense(512, activation=tf.nn.relu), keras.layers.Dense(1, activation=tf.nn.sigmoid) ]) bigger_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy','binary_crossentropy']) bigger_model.summary() # + [markdown] colab_type="text" id="D-d-i5DaYmr7" # 同样在相同的数据集上训练模型: # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="U1A99dhqvepf" bigger_history = bigger_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) # + [markdown] colab_type="text" id="Fy3CMUZpzH3d" # ### 绘制训练集和验证机损失 # # <!--TODO(markdaoust): This should be a one-liner with tensorboard --> # + [markdown] colab_type="text" id="HSlo1F4xHuuM" # 实线表示训练损失,虚线表示验证损失(记住:较低的验证损失表示更好的模型)。在这里,较小的网络过拟合时间晚于基线模型(在 6 个 epoch 之后而不是 4 个 epoch),并且一旦开始过度拟合,其性能下降得慢得多。 # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="0XmKDtOWzOpk" def plot_history(histories, key='binary_crossentropy'): plt.figure(figsize=(16,10)) for name, history in histories: val = plt.plot(history.epoch, history.history['val_'+key], '--', label=name.title()+' Val') plt.plot(history.epoch, history.history[key], color=val[0].get_color(), label=name.title()+' Train') plt.xlabel('Epochs') plt.ylabel(key.replace('_',' ').title()) plt.legend() plt.xlim([0,max(history.epoch)]) plot_history([('baseline', baseline_history), ('smaller', smaller_history), ('bigger', bigger_history)]) # + [markdown] colab_type="text" id="Bi6hBhdnSfjA" # 请注意,较大的网络在仅仅一个 epoch 之后就开始过拟合,并且越来越严重。网络容量越大,能够越快地对训练数据进行建模(导致训练损失低),但过拟合的可能性越大(导致训练和验证损失之间的差异很大)。 # + [markdown] colab_type="text" id="ASdv7nsgEFhx" # ## 策略 # + [markdown] colab_type="text" id="4rHoVWcswFLa" # ### 增加权重正则化 # # # + [markdown] colab_type="text" id="kRxWepNawbBK" # 你可能熟悉 Occam's Razor 原则:给出某个事件的两种解释,最可能正确的解释是「最简单」的解释,即做出最少假设的解释。这也适用于神经网络学习的模型:给定一些训练数据和网络架构,有多组权重值(多个模型)可以拟合数据,相比于复杂模型,简单模型不容易过拟合。 # # 在这种情况下,「简单模型」是一个模型,其中参数值的分布具有较小的熵(如我们在上面提到的具有较少参数的模型)。因此,降低过拟合的常见方法是通过限制网络权重,仅采用较小的权重值来约束网络的复杂性施,这使得权重值的分布更「规则」。这被称为「权重正则化」,并且通过向网络的损失函数添加与具有大权重相关联的代价来完成。这个代价有两种: # # * L1 正则化,其中所添加的成本与权重系数的绝对值成比例(即权重的「L1 范数」)。 # # * L2 正则化,其中所添加的成本与权重系数值的平方成比例(即权重的所谓「L2 范数」)。L2 正则化在神经网络的背景下也称为权重衰减。不要让不同的名字让你感到困惑:权重衰减在数学上与 L2 正则化完全相同。 # # 在 `tf.keras` 中,通过将权重正则化实例作为关键参数传递给网络层来实现权重正则化。现在让我们添加 L2 权重正则化。 # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="HFGmcwduwVyQ" l2_model = keras.models.Sequential([ keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001), activation=tf.nn.relu, input_shape=(NUM_WORDS,)), keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001), activation=tf.nn.relu), keras.layers.Dense(1, activation=tf.nn.sigmoid) ]) l2_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy']) l2_model_history = l2_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) # + [markdown] colab_type="text" id="bUUHoXb7w-_C" # ```l2(0.001)``` 意味着权值矩阵中的每一个参数都要经过 ```0.001 * weight_coefficient_value**2``` 累加到神经网络的代价损失函数中。请注意,由于此惩罚仅在训练时添加,因此在训练时此网络的损失将远高于测试时的损失。 # # 下面给出 L2 正规化惩罚的影响: # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="7wkfLyxBZdh_" plot_history([('baseline', baseline_history), ('l2', l2_model_history)]) # + [markdown] colab_type="text" id="Kx1YHMsVxWjP" # 正如您所看到的,L2 正则化模型与基准模型相比能够避免过拟合,即使两个模型具有相同数量的参数。 # + [markdown] colab_type="text" id="HmnBNOOVxiG8" # ### 添加 Dropout # # Dropout 是由 Hinton 和他在多伦多大学的学生开发的最有效和最常用的神经网络正则化技术之一。在训练期间随机「丢弃」(即设置为零)该层的多个输出值。假设一个给定的层在训练期间为给定的输入样本返回一个向量[0.2,0.5,1.3,0.8,1.1];在应用了 Dropout 之后,该向量部分值被随机置为 0,例如,[0,0.5, # 1.3,0,1.1]。「dropout 比例」是丢弃特征的比例;它通常取值在 0.2 和 0.5 之间。在测试时,没有单位被丢弃,而是将图层的输出值按比例缩小并等于 Dropout 比例,以便平衡更多单位活跃的事实而不是训练时间。 # # 在 tf.keras 中,您可以通过 Dropout 图层在网络中引入 Dropout,该图层应用于网络层输出之前。 # # 让我们在 IMDB 网络中添加两个 Dropout 图层,看看它们在降低过拟合方面做得如何: # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="OFEYvtrHxSWS" dpt_model = keras.models.Sequential([ keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)), keras.layers.Dropout(0.5), keras.layers.Dense(16, activation=tf.nn.relu), keras.layers.Dropout(0.5), keras.layers.Dense(1, activation=tf.nn.sigmoid) ]) dpt_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy','binary_crossentropy']) dpt_model_history = dpt_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="SPZqwVchx5xp" plot_history([('baseline', baseline_history), ('dropout', dpt_model_history)]) # + [markdown] colab_type="text" id="gjfnkEeQyAFG" # 添加 Dropout 对基准模型有明显改进。 # # # 回顾一下:这里介绍了防止神经网络中过拟合的常见方法: # # * 获取更多的训练数据。 # * 减少网络的容量。 # * 权重正则化。 # * 添加 Dropout。 # # 本指南未涉及的两个重要方法是数据增强和批量标准化。
tutorials/keras/overfit_and_underfit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # ETOPO1 Earth Relief # =================== # # ETOPO1 is a 1 arc-minute global relief model of Earth's surface that integrates # land topography and ocean bathymetry [AmanteEakins2009]_. It's available in two # versions: "Ice Surface" (top of Antarctic and Greenland ice sheets) and # "Bedrock" (base of the ice sheets). The grids are loaded into # :class:`xarray.Dataset` which can be used to plot and make computations. # # + import rockhound as rh import matplotlib.pyplot as plt import cmocean # Load a version of the topography grid grid = rh.fetch_etopo1(version="bedrock") print(grid) # Select a subset that corresponds to Africa to make plotting faster given the # size of the grid. africa = grid.sel(latitude=slice(-40, 45), longitude=slice(-20, 60)) # Plot the age grid. # We're not using a map projection to speed up the plotting but this NOT # recommended. plt.figure(figsize=(9, 8)) ax = plt.subplot(111) africa.bedrock.plot.pcolormesh( cmap=cmocean.cm.topo, cbar_kwargs=dict(pad=0.01, aspect=30), ax=ax ) ax.set_title("ETOPO1") plt.tight_layout() plt.show()
doc/gallery/etopo1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="kzfUtoUEVTwP" import math import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="8xi1q04PVTwT" # Loading the data def load_data(): from sklearn.model_selection import train_test_split data = np.genfromtxt('time_temp_2016.tsv', delimiter='\t') x = data[:, 0] x = x.reshape((x.shape[0], 1)) y = data[:, 1] train_set_x, test_set_x, train_set_y, test_set_y = train_test_split(x, y, test_size=0.33, random_state=42) train_set_y = train_set_y.reshape((1, train_set_y.shape[0])) test_set_y = test_set_y.reshape((1, test_set_y.shape[0])) return train_set_x.T, test_set_x.T, train_set_y, test_set_y, x.T train_set_x, test_set_x, train_set_y, test_set_y, full_feature_set_for_plot = load_data() # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="sSxdzsspVTwV" print(train_set_x.shape, train_set_y.shape, test_set_x.shape, test_set_y.shape) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="SWTf0wbjVTwX" m_train = train_set_x.shape[1] m_test = test_set_x.shape[1] print ("Number of training examples: m_train = " + str(m_train)) print ("Number of testing examples: m_test = " + str(m_test)) print ("\ntrain_set_x shape: " + str(train_set_x.shape)) print ("train_set_y shape: " + str(train_set_y.shape)) print ("test_set_x shape: " + str(test_set_x.shape)) print ("test_set_y shape: " + str(test_set_y.shape)) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="veYXpJcCVTwc" cmap = plt.get_cmap('viridis') # Plot the results m1 = plt.scatter(366 * train_set_x, train_set_y, color=cmap(0.9), s=10) m2 = plt.scatter(366 * test_set_x, test_set_y, color=cmap(0.5), s=10) plt.xlabel('Day') plt.ylabel('Temperature in Celcius') plt.legend((m1, m2), ("Training data", "Test data"), loc='lower right') plt.show() # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="wEBlvPFPVTwh" def polynomial_features(X, degree): from itertools import combinations_with_replacement # combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC n_features, n_samples = np.shape(X) def index_combinations(): ## (1, 2) => [(1),(2),(1,1),(1,2),(2,2)] combs = [combinations_with_replacement(range(n_features), i) for i in range(0, degree + 1)] ##comb = [(),((1),(2)),((1,1),(1,2),(2,2))] flat_combs = [item for sublist in combs for item in sublist] ##flat_combs = [(1),(2),(1,1),(1,2),(2,2)] return flat_combs combinations = index_combinations() n_output_features = len(combinations) X_new = np.empty((n_output_features, n_samples)) for i, index_combs in enumerate(combinations): X_new[i, :] = np.prod(X[index_combs, :], axis=0) ## if index_combs == (1,2,3) => X_new[:,i] = X[:,1] * X[:,2] * X[:,3] return X_new # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="kP8lQFc_VTwk" # mean_squared_error def mean_squared_error(y_true, y_pred): """ Returns the mean squared error between y_true and y_pred Arguments: y_true -- array of true values y_pred -- array of predicted values Returns: mse -- mean squared error """ mse = np.sum((y_pred - y_true) ** 2) / y_true.squeeze().shape[0] return mse # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="skPGXgmaVTwo" print ("mse = " + str(mean_squared_error(np.array([1, 2, 3, 4]), np.array([2, 3, 4, 6])))) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="luYMVongVTwr" # l2_regularization class l2_regularization(): """ Regularization for Ridge Regression """ def __init__(self, alpha): """ Set alpha """ self.alpha = alpha def __call__(self, w): """ Computes l2 regularization term Arguments: w -- weights Returns: term -- 1/2 * alpha * norm(w)^2 """ term = np.sum(w ** 2) * self.alpha / 2 return term def grad(self, w): """ Computes derivative of l2 regularization term Arguments: w -- weights Returns: vector -- alpha * w """ derivative = self.alpha * w return derivative # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="jj9oruDhVTws" l2 = l2_regularization(0.5) print ("l2 reg. term = " + str(l2(np.array([1, 2, 3, 4])))) print ("l2 grad. = " + str(l2.grad(np.array([1, 2, 3, 4])))) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="8dQSUzNdVTwx" # PolynomialRidgeRegression class PolynomialRidgeRegression(object): """ Parameters: ----------- degree: int The degree of the polynomial that the independent variable X will be transformed to. reg_factor: float The factor that will determine the amount of regularization and feature shrinkage. n_iterations: int The number of training iterations the algorithm will tune the weights for. learning_rate: float The step length that will be used when updating the weights. """ def __init__(self, degree, reg_factor, n_iterations=3000, learning_rate=0.01, print_error=False): self.degree = degree self.regularization = l2_regularization(alpha=reg_factor) self.n_iterations = n_iterations self.learning_rate = learning_rate self.print_error = print_error def initialize_with_zeros(self, n_features): """ This function creates a vector of zeros of shape (n_features, 1) Arguments: n_features -- amount of features """ self.w = np.zeros((n_features, 1)) def fit(self, X, Y): # Generate polynomial features X = polynomial_features(X, self.degree) # Insert constant ones for bias weights biases = np.ones((1, X.shape[1])) X = np.vstack((biases, X)) # Create array self.initialize_with_zeros(n_features=X.shape[0]) # Do gradient descent for n_iterations for i in range(self.n_iterations): # Calculate prediction H = np.dot(self.w.T, X) # Gradient of l2 loss grad_w = self.regularization.grad(self.w) # Update the weights deltas = H - Y self.w = self.w - self.learning_rate * (np.dot(X, deltas.T) + grad_w) if self.print_error and i % 1000 == 0: # Calculate l2 loss mse = mean_squared_error(Y, H) print ("MSE after iteration %i: %f" %(i, mse)) def predict(self, X): # Generate polynomial features X = polynomial_features(X, self.degree) # Insert constant ones for bias weights biases = np.ones((1, X.shape[1])) X = np.vstack((biases, X)) # Calculate prediction y_pred = np.dot(self.w.T, X) return y_pred # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="OMp_eKnqVTw0" poly_degree = 15 learning_rate = 0.001 n_iterations = 10000 reg_factor = 0.1 # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="27_JfRk5VTw3" model = PolynomialRidgeRegression( degree=poly_degree, reg_factor=reg_factor, learning_rate=learning_rate, n_iterations=n_iterations, print_error=True ) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="Kmcc8nSCVTw4" model.fit(train_set_x, train_set_y) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="8gSf84zmVTw8" y_predictions = model.predict(test_set_x) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="NRTQXtDKVTw-" mse = mean_squared_error(test_set_y, y_predictions) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="6eImX7c5VTxA" print ("Mean squared error on test set: %s (given by reg. factor: %s)" % (mse, reg_factor)) # - print (f"Mean squared error on test set: {mse} (given by reg. factor: {reg_factor})") # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="s5REMxqxVTxC" cmap = plt.get_cmap('viridis') # Predict for all points in set y_val = model.predict(full_feature_set_for_plot) # Plot the results m1 = plt.scatter(366 * train_set_x, train_set_y, color=cmap(0.9), s=10) m2 = plt.scatter(366 * test_set_x, test_set_y, color=cmap(0.5), s=10) plt.plot(366 * full_feature_set_for_plot.T, y_val.T, color='black', linewidth=2, label="Prediction") plt.suptitle("Polynomial Ridge Regression") plt.title("MSE: %.2f" % mse, fontsize=10) plt.xlabel('Day') plt.ylabel('Temperature in Celcius') plt.legend((m1, m2), ("Training data", "Test data"), loc='lower right') plt.show() # + # применение модели scikit learn для датасета из time_temp_2016.tsv train_set_x, test_set_x, train_set_y, test_set_y, full_feature_set_for_plot = load_data() print(train_set_x.shape, train_set_y.shape, test_set_x.shape, test_set_y.shape) # + # преобразуем в привычный формат - из (n_features x n_samples) в (n_samples x n_features) train_set_x = train_set_x.T train_set_y = train_set_y.T test_set_x = test_set_x.T test_set_y = test_set_y.T print(train_set_x.shape, train_set_y.shape, test_set_x.shape, test_set_y.shape) # + from sklearn.linear_model import Ridge from sklearn.preprocessing import PolynomialFeatures degree = 15 poly = PolynomialFeatures(degree) train_set_x = poly.fit_transform(train_set_x) test_set_x = poly.fit_transform(test_set_x) model = Ridge(alpha=0.1) model.fit(train_set_x, train_set_y) model.get_params() # + # Return the coefficient of determination R^2 of the prediction. model.score(train_set_x, train_set_y) # + # predictions on train and test sets predictions_train = model.predict(train_set_x) predictions_test = model.predict(test_set_x) # + from sklearn.metrics import r2_score r2_score(train_set_y, predictions_train) # + from sklearn.metrics import mean_squared_error mean_squared_error(train_set_y, predictions_train) # + # определение mse, используя ранее написанную пользовательскую функцию mse = mean_squared_error(train_set_y, predictions_train) mse # + # mse на тестовом сете mean_squared_error(test_set_y, predictions_test) # - model.coef_ model.intercept_ # + # создание регрессионной модели с L1-регуляризацией (Lasso) from sklearn.linear_model import Lasso model = Lasso(alpha=0.1) model.fit(train_set_x, train_set_y) model.get_params() # - model.score(train_set_x, train_set_y) # + # создание регрессионной модели с L1 и L2 регуляризацией (ElasticNet) from sklearn.linear_model import ElasticNet model = ElasticNet(random_state=0, alpha=0.03, l1_ratio=0.99) model.fit(train_set_x, train_set_y) model.get_params() # - model.score(train_set_x, train_set_y)
Polynomial_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: myconda # language: python # name: myconda # --- import tensorflow as tf import re import os import collections import numpy as np import codecs import collections import random from operator import itemgetter #GPU设置 config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.95 #占用95%显存 session = tf.Session(config=config) os.environ['CUDA_VISIBLE_DEVICES']="0" # ## 数据构建 def get_vocab(input_data,min_word_freq): counter = collections.Counter() with codecs.open(input_data,"r","utf-8") as f: for line in f: line = ' '.join(re.split(' |\t|\v|\n',line)) #将数据中的空格符统一,便于后期处理(原始数据中空格符包含\t、\v等) line = re.split('([: ,.(){}\[\]=])',line) #将字符串数据按照括号中的符号进行分割,分割成列表格式,并且在列表中保留分隔符 line = list(filter(lambda x: x!=' 'and x!='',line)) for word in line: counter[word] += 1 counter = filter(lambda x: x[1] > min_word_freq, counter.items()) sorted_word_to_cnt = sorted(counter,key=itemgetter(1),reverse=True) sorted_words = [x[0] for x in sorted_word_to_cnt] sorted_words = ["<UNK>","<GO>","<EOS>","<PAD>"] + sorted_words print("vocab_len: " + str(len(sorted_words))) return sorted_words input_data = "../00-data/tf_data.txt" min_word_freq = 3 vocab = get_vocab(input_data,min_word_freq) vocab_file = "../00-data/vocab" fout_vocab = codecs.open(vocab_file,"w","utf-8") for word in vocab: fout_vocab.write(word + "\n") fout_vocab.close() vocab # ### 【随机】前k句预测下一句 def clean_and_split(line): line = ' '.join(re.split(' |\t|\v|\n',line)) line = re.split('([: ,.(){}\[\]=])',line) line = list(filter(lambda x: x!=' 'and x!='',line)) return line def get_newdata_by_random(raw_data,source_train,source_test,target_train,target_test,rand_max=10,duplicate=1): import codecs import sys import re fout_source_train = codecs.open(source_train,"w","utf-8") fout_source_test = codecs.open(source_test,"w","utf-8") fout_target_train = codecs.open(target_train,"w","utf-8") fout_target_test = codecs.open(target_test,"w","utf-8") data_lists = [] data_length = 0 with open(raw_data,"r") as fin: lines = fin.readlines() for i in range(rand_max,len(lines)): rand_nums = set(random.randint(1,rand_max) for _ in range(duplicate)) for rand_num in rand_nums: data_line = "" #构造enc_data words = [] for j in range(i - rand_num,i): line = clean_and_split(lines[j]) words += ["<GO>"] + line + ["<EOS>"] data_line += ' '.join(words) #构造dec_data words = [] line = clean_and_split(lines[i]) words = line + ["<EOS>"] data_line += " !@! " + ' '.join(words) data_lists.append(data_line) data_length += 1 random.shuffle(data_lists) test_nums = data_length // 10 for i in range(test_nums): line = data_lists[i].split("!@!") fout_source_test.write(line[0].strip() + "\n") fout_target_test.write(line[1].strip() + "\n") for i in range(test_nums,data_length): line = data_lists[i].split("!@!") fout_source_train.write(line[0].strip() + "\n") fout_target_train.write(line[1].strip() + "\n") fout_source_train.close() fout_source_test.close() fout_target_train.close() fout_target_test.close() return data_length source_train = "../00-data/beam_search_data/source_train" source_test = "../00-data/beam_search_data/source_test" target_train = "../00-data/beam_search_data/target_train" target_test = "../00-data/beam_search_data/target_test" data_length = get_newdata_by_random(input_data,source_train,source_test,target_train,target_test) print("data_len: " + str(data_length)) # ### beam_search部分 # + import numpy as np import os base_dir = 'data/beam_search_data' def open_file(filename, mode='r'): return open(filename, mode, encoding='utf-8', errors='ignore') def process_file(file_dir, letter_to_id): letter_ids = [] len_ = [] with open_file(file_dir) as f: for line in f: letter_id = [] conts = line.strip().split(" ") for con in conts: letter_id.append(letter_to_id.get(con, letter_to_id['<UNK>'])) letter_ids.append(letter_id) len_.append(len(letter_id)) return np.array(letter_ids), np.array(len_) def read_vocab(vocab_dir): """读取词汇表""" words = open_file(vocab_dir).read().strip().split('\n') word_to_id = dict(zip(words, range(len(words)))) id_to_word = {idx: word for word, idx in word_to_id.items()} return word_to_id, id_to_word def pad_sentence_batch(sentence_batch, pad_int): max_sentence = max([len(sentence) for sentence in sentence_batch]) return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch] def clip_batch(sources_batch, targets_batch, source_pad_int, target_pad_int): pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int)) pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int)) # 记录每条记录的长度 targets_lengths = [] for target in targets_batch: targets_lengths.append(len(target)) source_lengths = [] for source in sources_batch: source_lengths.append(len(source)) return pad_sources_batch, source_lengths, pad_targets_batch, targets_lengths # def batch_iter(source_train, len_source_train, target_train, len_target_train, batch_size, source_pad_int, target_pad_int): def batch_iter(source_train, target_train, batch_size, source_pad_int, target_pad_int): """生成批次数据""" data_len = len(source_train) num_batch = int((data_len - 1) / batch_size) + 1 for i in range(num_batch): start_id = i * batch_size end_id = min((i + 1) * batch_size, data_len) yield clip_batch(source_train[start_id:end_id], target_train[start_id:end_id], source_pad_int, target_pad_int) # - # ## 模型构建 # + from tensorflow.python.layers.core import Dense class ModelConfig(object): """CNN配置参数""" print_per_batch = 10 # 每多少轮输出一次结果 num_epochs = 30 batch_size = 256 rnn_size = 256 num_layers = 2 encoding_embedding_size = 256 decoding_embedding_size = 256 learning_rate_base = 0.003 learning_rate_decay = 0.95 #应该设置成0.9比较好 beam_width = 5 def get_multi_rnn_cell(rnn_size, num_layers): return tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)) class Model(object): """文本分类,CNN模型""" def __init__(self, config): self.config = config # 输入 self.source = tf.placeholder(tf.int32, [None, None], name='source') self.target = tf.placeholder(tf.int32, [None, None], name='target') self.target_sequence_length = tf.placeholder(tf.int32, (None,), name='target_sequence_length') self.source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length') self.global_step = tf.placeholder(tf.int32, name='global_step') # 1. encoder source_embedding = tf.get_variable('source_embedding', [self.config.source_vocab_size, self.config.encoding_embedding_size]) source_embedding_inputs = tf.nn.embedding_lookup(source_embedding, self.source) # bi-LSTM fw_lstm_cell = tf.contrib.rnn.LSTMCell(int(self.config.rnn_size / 2), initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)) bw_lstm_cell = tf.contrib.rnn.LSTMCell(int(self.config.rnn_size / 2), initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)) (outputs, (fw_state, bw_state)) = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_lstm_cell, cell_bw=bw_lstm_cell, inputs=source_embedding_inputs, sequence_length=self.source_sequence_length, dtype=tf.float32) encoder_output = tf.concat(outputs, -1) encoder_final_state_c = tf.concat([fw_state.c, bw_state.c], -1) encoder_final_state_h = tf.concat([fw_state.h, bw_state.h], -1) encoder_state = tf.contrib.rnn.LSTMStateTuple( c=encoder_final_state_c, h=encoder_final_state_h ) # 2. decoder ending = tf.strided_slice(self.target, [0, 0], [self.config.batch_size, -1], [1, 1]) decoder_input = tf.concat([tf.fill([tf.shape(self.target)[0], 1], self.config.target_letter_to_id['<GO>']), ending], 1) target_embedding = tf.get_variable('target_embedding', [self.config.target_vocab_size, self.config.decoding_embedding_size]) target_embedding_inputs = tf.nn.embedding_lookup(target_embedding, decoder_input) decoder_cell = get_multi_rnn_cell(self.config.rnn_size, self.config.num_layers) output_layer = Dense(self.config.target_vocab_size, kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1)) # 训练阶段 with tf.variable_scope("decode"): # attention attention_mechanism = tf.contrib.seq2seq.LuongAttention(self.config.rnn_size, encoder_output, memory_sequence_length=self.source_sequence_length) attention_decoder_cell = tf.contrib.seq2seq.AttentionWrapper(decoder_cell, attention_mechanism, attention_layer_size=self.config.rnn_size) # initial_state initial_state = attention_decoder_cell.zero_state(tf.shape(self.source)[0], tf.float32).clone(cell_state=encoder_state) training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=target_embedding_inputs, sequence_length=self.target_sequence_length) training_decoder = tf.contrib.seq2seq.BasicDecoder( attention_decoder_cell, training_helper, initial_state, # 使用encoder模块的输出状态来初始化attention_decoder的初始states,若直接使用encoder_state会报错 output_layer) training_decoder_output, _, __ = tf.contrib.seq2seq.dynamic_decode( training_decoder, impute_finished=True, # 遇到EOS自动停止解码(EOS之后的所有time step的输出为0,输出状态为最后一个有效time step的输出状态) maximum_iterations=None) # 设置最大decoding time steps数量,默认decode until the decoder is fully done,因为训练时会将target序列传入,所以可以为None self.logits = training_decoder_output.rnn_output # 测试阶段 with tf.variable_scope("decode", reuse=True): # a. decoder_attention bs_encoder_output = tf.contrib.seq2seq.tile_batch(encoder_output, multiplier=self.config.beam_width) # tile_batch等价于复制10份,然后concat(..., 0) bs_sequence_length = tf.contrib.seq2seq.tile_batch(self.source_sequence_length, multiplier=self.config.beam_width) bs_attention_mechanism = tf.contrib.seq2seq.LuongAttention(self.config.rnn_size, bs_encoder_output, memory_sequence_length=bs_sequence_length) bs_attention_decoder_cell = tf.contrib.seq2seq.AttentionWrapper(decoder_cell, bs_attention_mechanism, attention_layer_size=self.config.rnn_size) # b. decoder_initial_state bs_cell_state = tf.contrib.seq2seq.tile_batch(encoder_state, multiplier=self.config.beam_width) bs_initial_state = bs_attention_decoder_cell.zero_state(tf.shape(self.source)[0] * self.config.beam_width, tf.float32).clone(cell_state=bs_cell_state) predicting_decoder = tf.contrib.seq2seq.BeamSearchDecoder( cell=bs_attention_decoder_cell, embedding=target_embedding, start_tokens=tf.fill([tf.shape(self.source)[0]], self.config.target_letter_to_id['<GO>']), end_token=self.config.target_letter_to_id['<EOS>'], initial_state=bs_initial_state, beam_width=self.config.beam_width, output_layer=output_layer, length_penalty_weight=0.0) # 对长度较短的生成结果施加惩罚,0.0表示不惩罚 predicting_decoder_output, _, __ = tf.contrib.seq2seq.dynamic_decode( predicting_decoder, impute_finished=False, # 遇到EOS自动停止解码输出(停止输出,输出状态为最后一个有效time step的输出状态) maximum_iterations=tf.round(tf.reduce_max(self.source_sequence_length) * 2)) # 预测时不知道什么时候输出EOS,所以要设置最大time step数量 self.result_ids = tf.transpose(predicting_decoder_output.predicted_ids, perm=[0, 2, 1]) # 输出target vocab id:[batch_size, beam_width, max_time_step] #print(tf.trainable_variables()) # 3. optimize masks = tf.sequence_mask(self.target_sequence_length, tf.reduce_max(self.target_sequence_length), dtype=tf.float32) self.loss = tf.contrib.seq2seq.sequence_loss(self.logits, self.target, masks) self.learning_rate = tf.train.exponential_decay( self.config.learning_rate_base, self.global_step, data_length / self.config.batch_size, #data_length是数据集的大小 self.config.learning_rate_decay, staircase=True) optimizer = tf.train.AdamOptimizer(self.learning_rate) # 梯度裁剪 gradients = optimizer.compute_gradients(self.loss) capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None] self.train_op = optimizer.apply_gradients(capped_gradients) # - # ## 训练 # + import time from datetime import timedelta import sys base_dir = '../00-data/beam_search_data' source_train_dir = os.path.join(base_dir, 'source_train') source_test_dir = os.path.join(base_dir, 'source_test') target_train_dir = os.path.join(base_dir, 'target_train') target_test_dir = os.path.join(base_dir, 'target_test') source_vocab_dir = os.path.join(base_dir, 'vocab') target_vocab_dir = os.path.join(base_dir, 'vocab') save_dir = '../02-checkpoints/03-0322' save_path = os.path.join(save_dir, 'best_validation') # 最佳验证结果保存路径 def get_time_dif(start_time): """获取已使用时间""" end_time = time.time() time_dif = end_time - start_time return timedelta(seconds=int(round(time_dif))) def evaluate(sess, source_test, target_test,total_batch): """评估在某一数据上的准确率和损失""" data_len = len(source_test) batch_eval = batch_iter(source_test, target_test, config.batch_size, source_letter_to_id['<PAD>'], target_letter_to_id['<PAD>']) total_loss = 0. for source_train_batch, len_source_train_batch, target_train_batch, len_target_train_batch in batch_eval: batch_len = len(source_train_batch) feed_dict = { model.source: source_train_batch, model.target: target_train_batch, model.source_sequence_length: len_source_train_batch, model.target_sequence_length: len_target_train_batch, model.global_step: total_batch # model.is_train: True } loss = sess.run([model.loss], feed_dict=feed_dict) loss = np.mean(loss) total_loss += loss * batch_len return total_loss / data_len def train(): # 配置 Saver saver = tf.train.Saver() if not os.path.exists(save_dir): os.makedirs(save_dir) # 载入训练集与验证集 print("Loading data...") source_train, len_source_train = process_file(source_train_dir, source_letter_to_id) source_test, len_source_test = process_file(source_test_dir, source_letter_to_id) target_train, len_target_train = process_file(target_train_dir, target_letter_to_id) target_test, len_target_test = process_file(target_test_dir, target_letter_to_id) # 创建session session = tf.Session() session.run(tf.global_variables_initializer()) print('Training and evaluating...') start_time = time.time() total_batch = 0 # 总批次 best_val_loss = sys.float_info.max # 最佳验证集效果 last_improved = 0 # 记录上一次提升批次 require_improvement = 100 # 如果超过1000轮未提升,提前结束训练 flag = False for epoch in range(config.num_epochs): print('Epoch:', epoch + 1) batch_train = batch_iter(source_train, target_train, config.batch_size, source_letter_to_id['<PAD>'], target_letter_to_id['<PAD>']) for source_train_batch, len_source_train_batch, target_train_batch, len_target_train_batch in batch_train: feed_dict = { model.source: source_train_batch, model.target: target_train_batch, model.source_sequence_length: len_source_train_batch, model.target_sequence_length: len_target_train_batch, model.global_step: total_batch # model.is_train: True } if total_batch % config.print_per_batch == 0: # 每多少轮次输出在训练集上的性能 loss_train = np.mean(session.run([model.loss], feed_dict=feed_dict)) loss_val = evaluate(session, source_test, target_test,total_batch) if loss_val < best_val_loss: # 保存最好结果 best_val_loss = loss_val last_improved = total_batch saver.save(sess=session, save_path=save_path) improved_str = '*' else: improved_str = '' time_dif = get_time_dif(start_time) lr = session.run([model.learning_rate], feed_dict={model.global_step: total_batch}) #print(lr) msg = 'total_batch: {0:>5}, Train Loss: {1:.5f}, Val Loss: {2:.5f}, Learning Rate:{3:.5f}, Time: {4} {5}' print(msg.format(total_batch, loss_train, loss_val, lr[0],time_dif, improved_str)) session.run(model.train_op, feed_dict=feed_dict) # 运行优化 total_batch += 1 # if total_batch - last_improved > require_improvement: # # 验证集正确率长期不提升,提前结束训练 # print("No optimization for a long time, auto-stopping...") # flag = True # break # 跳出循环 # if flag: # 同上 # break if __name__ == '__main__': tf.reset_default_graph() print('Configuring model...') config = ModelConfig() source_letter_to_id, source_id_to_letter = read_vocab(source_vocab_dir) target_letter_to_id, target_id_to_letter = read_vocab(target_vocab_dir) config.source_vocab_size = len(source_letter_to_id) config.target_vocab_size = len(target_letter_to_id) config.target_letter_to_id = target_letter_to_id model = Model(config) train() # - # ## 测试 def process_predict_input(test_sentences,letter_to_id): test_vector = [] test_text = "" for sentence in test_sentences: line = ' '.join(re.split(' |\t|\v|\n',sentence)) line = re.split('([: ,.(){}\[\]=])',line) line = list(filter(lambda x: x!=' 'and x!='',line)) test_vector.append(letter_to_id["<GO>"]) for word in line: test_vector.append(letter_to_id.get(word,letter_to_id["<UNK>"])) test_vector.append(letter_to_id["<EOS>"]) return [test_vector],[len(test_vector)] def test(): print("Testing...") test_sentences = ["logits = tf.matmul(output,self.softmax_weight) + self.softmax_bias", "loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(trg_label,[-1]),logits=logits)", "label_weights = tf.sequence_mask(trg_size,maxlen=tf.shape(trg_label)[1],dtype=tf.float32)", "label_weights = tf.reshape(label_weights,[-1])"] source_test, len_source_test = process_predict_input(test_sentences, source_letter_to_id) print(source_test) session = tf.Session() session.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess=session, save_path=save_path) # 读取保存的模型 feed_dict = { model.source: source_test, model.source_sequence_length: len_source_test, # model.target_sequence_length: len_source_test, # model.is_train: False } result_ids = session.run(model.result_ids, feed_dict=feed_dict) # print('输出: {}'.format("".join([target_id_to_letter[i] for i in result_ids]))) # beam search输出 print(result_ids.shape) result_ids = np.squeeze(result_ids) print(result_ids.shape) for x in result_ids: res = [] for i in x: if target_id_to_letter[i] == "<EOS>": break res.append(target_id_to_letter[i]) print(" ".join(res)) test()
model/02-Seq2Seq/.ipynb_checkpoints/beam_search_demo-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.3 64-bit (''flim'': conda)' # metadata: # interpreter: # hash: d56923169773f93e593b03c3dcee9f5b1a8d57471a265acfdbc0de17c63918a7 # name: python3 # --- # + import flim from flim.experiments import utils, LIDSDataset, ToTensor from flim.models.lcn import LCNCreator import torch import matplotlib.pyplot as plt # - if torch.cuda.is_available(): device = 'cuda:0' else: deice = 'cpu' # load architecture architecture = utils.load_architecture('arch.json') # load image and markers images, markers = utils.load_images_and_markers('markers') # + # create the model creator = LCNCreator(architecture, images=images, markers=markers, relabel_markers=False, device=device) creator.build_feature_extractor() model = creator.get_LIDSConvNet() # - # Define train set. It should be a folder with images named as 0000{label}_0000{imge_number}.{image_format} # You can create a .txt file with the name of the images in the train set and another file with the name of the images in test set trainset = LIDSDataset('dataset', 'test.txt', transform=ToTensor()) # + # Save as MImages # + tags=[] utils.save_intermediate_outputs(model, trainset, "outputs", format="mimg", layers=["pool", "activation"], device=device) # - image = utils.load_mimage('outputs/intermediate-outputs/conv1/0001_0001.mimg') plt.imshow(image[:, :, 0]) # + # Save as OPF Dataset # - utils.save_intermediate_outputs(model, trainset, "outputs", format="zip", layer=["conv1"], device=device) opf_dataset = utils.load_opf_dataset('outputs/intermediate-outputs/conv1/dataset.zip')
example/save_output_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + cellView="form" id="upmJn_DjcThx" #@title ###### Licensed to the Apache Software Foundation (ASF), Version 2.0 (the "License") # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # + [markdown] id="5UC_aGanx6oE" # # Reading and writing data -- _Tour of Beam_ # # So far we've learned some of the basic transforms like # [`Map`](https://beam.apache.org/documentation/transforms/python/elementwise/map), # [`FlatMap`](https://beam.apache.org/documentation/transforms/python/elementwise/flatmap), # [`Filter`](https://beam.apache.org/documentation/transforms/python/elementwise/filter), # [`Combine`](https://beam.apache.org/documentation/transforms/python/aggregation/combineglobally), and # [`GroupByKey`](https://beam.apache.org/documentation/transforms/python/aggregation/groupbykey). # These allow us to transform data in any way, but so far we've used # [`Create`](https://beam.apache.org/documentation/transforms/python/other/create) # to get data from an in-memory # [`iterable`](https://docs.python.org/3/glossary.html#term-iterable), like a `list`. # # This works well for experimenting with small datasets. For larger datasets we can use `Source` transforms to read data and `Sink` transforms to write data. # If there are no built-in `Source` or `Sink` transforms, we can also easily create our custom I/O transforms. # # Let's create some data files and see how we can read them in Beam. # + id="R_Yhoc6N_Flg" outputId="94144efc-6b65-4eb7-ea72-23fe3e192428" colab={"base_uri": "https://localhost:8080/"} # Install apache-beam with pip. # !pip install --quiet apache-beam # Create a directory for our data files. # !mkdir -p data # + id="sQUUi4H9s-g2" outputId="da02ce82-61ce-43e2-a5f0-21b06df76bd6" colab={"base_uri": "https://localhost:8080/"} # %%writefile data/my-text-file-1.txt This is just a plain text file, UTF-8 strings are allowed 🎉. Each line in the file is one element in the PCollection. # + id="BWVVeTSOlKug" outputId="2f0ad045-0af3-4ca7-f5a3-1f348b5c1517" colab={"base_uri": "https://localhost:8080/"} # %%writefile data/my-text-file-2.txt There are no guarantees on the order of the elements. ฅ^•ﻌ•^ฅ # + id="NhCws6ncbDJG" outputId="0b5e0dc6-43ee-4786-c1d5-984fd422e445" colab={"base_uri": "https://localhost:8080/"} # %%writefile data/penguins.csv species,culmen_length_mm,culmen_depth_mm,flipper_length_mm,body_mass_g 0,0.2545454545454545,0.6666666666666666,0.15254237288135594,0.2916666666666667 0,0.26909090909090905,0.5119047619047618,0.23728813559322035,0.3055555555555556 1,0.5236363636363636,0.5714285714285713,0.3389830508474576,0.2222222222222222 1,0.6509090909090909,0.7619047619047619,0.4067796610169492,0.3333333333333333 2,0.509090909090909,0.011904761904761862,0.6610169491525424,0.5 2,0.6509090909090909,0.38095238095238104,0.9830508474576272,0.8333333333333334 # + [markdown] id="_OkWHiAvpWDZ" # # Reading from text files # # We can use the # [`ReadFromText`](https://beam.apache.org/releases/pydoc/current/apache_beam.io.textio.html#apache_beam.io.textio.ReadFromText) # transform to read text files into `str` elements. # # It takes a # [_glob pattern_](https://en.wikipedia.org/wiki/Glob_%28programming%29) # as an input, and reads all the files that match that pattern. # It returns one element for each line in the file. # # For example, in the pattern `data/*.txt`, the `*` is a wildcard that matches anything. This pattern matches all the files in the `data/` directory with a `.txt` extension. # + colab={"base_uri": "https://localhost:8080/"} id="xDXdE9uysriw" outputId="3ef9f7a9-8291-42a1-be03-71e50de266f5" import apache_beam as beam input_files = 'data/*.txt' with beam.Pipeline() as pipeline: ( pipeline | 'Read files' >> beam.io.ReadFromText(input_files) | 'Print contents' >> beam.Map(print) ) # + [markdown] id="9-2wmzEWsdrb" # # Writing to text files # # We can use the # [`WriteToText`](https://beam.apache.org/releases/pydoc/2.27.0/apache_beam.io.textio.html#apache_beam.io.textio.WriteToText) transform to write `str` elements into text files. # # It takes a _file path prefix_ as an input, and it writes the all `str` elements into one or more files with filenames starting with that prefix. You can optionally pass a `file_name_suffix` as well, usually used for the file extension. Each element goes into its own line in the output files. # + id="nkPlfoTfz61I" import apache_beam as beam output_file_name_prefix = 'outputs/file' with beam.Pipeline() as pipeline: ( pipeline | 'Create file lines' >> beam.Create([ 'Each element must be a string.', 'It writes one element per line.', 'There are no guarantees on the line order.', 'The data might be written into multiple files.', ]) | 'Write to files' >> beam.io.WriteToText( output_file_name_prefix, file_name_suffix='.txt') ) # + colab={"base_uri": "https://localhost:8080/"} id="8au0yJSd1itt" outputId="4822458b-2724-42e9-c71f-280a82d505d6" # Lets look at the output files and contents. # !head outputs/file*.txt # + [markdown] id="21CCdZispqYK" # # Reading data # # Your data might reside in various input formats. Take a look at the # [Built-in I/O Transforms](https://beam.apache.org/documentation/io/built-in) # page for a list of all the available I/O transforms in Beam. # # If none of those work for you, you might need to create your own input transform. # # > ℹ️ For a more in-depth guide, take a look at the # [Developing a new I/O connector](https://beam.apache.org/documentation/io/developing-io-overview) page. # + [markdown] id="7dQEym1QRG4y" # ## Reading from an `iterable` # # The easiest way to create elements is using # [`FlatMap`](https://beam.apache.org/documentation/transforms/python/elementwise/flatmap). # # A common way is having a [`generator`](https://docs.python.org/3/glossary.html#term-generator) function. This could take an input and _expand_ it into a large amount of elements. The nice thing about `generator`s is that they don't have to fit everything into memory like a `list`, they simply # [`yield`](https://docs.python.org/3/reference/simple_stmts.html#yield) # elements as they process them. # # For example, let's define a `generator` called `count`, that `yield`s the numbers from `0` to `n`. We use `Create` for the initial `n` value(s) and then exapand them with `FlatMap`. # + colab={"base_uri": "https://localhost:8080/"} id="wR6WY6wOMVhb" outputId="f90ae0a1-0cb4-4f25-fa93-10f9de856a95" import apache_beam as beam from typing import Iterable def count(n: int) -> Iterable[int]: for i in range(n): yield i n = 5 with beam.Pipeline() as pipeline: ( pipeline | 'Create inputs' >> beam.Create([n]) | 'Generate elements' >> beam.FlatMap(count) | 'Print elements' >> beam.Map(print) ) # + [markdown] id="G4fw7NE1RQNf" # ## Creating an input transform # # For a nicer interface, we could abstract the `Create` and the `FlatMap` into a custom `PTransform`. This would give a more intuitive way to use it, while hiding the inner workings. # # We need a new class that inherits from `beam.PTransform`. We can do this more conveniently with the # [`beam.ptransform_fn`](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.ptransform.html#apache_beam.transforms.ptransform.ptransform_fn) decorator. # # The `PTransform` function takes the input `PCollection` as the first argument, and any other inputs from the generator function, like `n`, can be arguments to the `PTransform` as well. The original generator function can be defined locally within the `PTransform`. # Finally, we apply the `Create` and `FlatMap` transforms and return a new `PCollection`. # # We can also, optionally, add type hints with the [`with_input_types`](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.ptransform.html#apache_beam.transforms.ptransform.PTransform.with_input_types) and [`with_output_types`](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.ptransform.html#apache_beam.transforms.ptransform.PTransform.with_output_types) decorators. They serve both as documentation, and are a way to ensure your data types are consistent throughout your pipeline. This becomes more useful as the complexity grows. # # Since our `PTransform` is expected to be the first transform in the pipeline, it doesn't receive any inputs. We can mark it as the beginning with the [`PBegin`](https://beam.apache.org/releases/pydoc/current/_modules/apache_beam/pvalue.html) type hint. # # Finally, to enable type checking, you can pass `--type_check_additional=all` when running your pipeline. Alternatively, you can also pass it directly to `PipelineOptions` if you want them enabled by default. To learn more about pipeline options, see [Configuring pipeline options](https://beam.apache.org/documentation/programming-guide/#configuring-pipeline-options). # + colab={"base_uri": "https://localhost:8080/"} id="m8iXqE1CRnn5" outputId="d77fd363-76eb-49ce-8729-82d6cd38cfda" import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions from typing import Iterable @beam.ptransform_fn @beam.typehints.with_input_types(beam.pvalue.PBegin) @beam.typehints.with_output_types(int) def Count(pbegin: beam.pvalue.PBegin, n: int) -> beam.PCollection[int]: def count(n: int) -> Iterable[int]: for i in range(n): yield i return ( pbegin | 'Create inputs' >> beam.Create([n]) | 'Generate elements' >> beam.FlatMap(count) ) n = 5 options = PipelineOptions(flags=[], type_check_additional='all') with beam.Pipeline(options=options) as pipeline: ( pipeline | f'Count to {n}' >> Count(n) | 'Print elements' >> beam.Map(print) ) # + [markdown] id="e02_vFmUg-mK" # ## Example: Reading CSV files # # Lets say we want to read CSV files to get elements as Python dictionaries. We like how `ReadFromText` expands a file pattern, but we might want to allow for multiple patterns as well. # # We create a `ReadCsvFiles` transform, which takes a list of `file_patterns` as input. It expands all the `glob` patterns, and then, for each file name it reads each row as a `dict` using the # [`csv.DictReader`](https://docs.python.org/3/library/csv.html#csv.DictReader) module. # # We could use the [`open`](https://docs.python.org/3/library/functions.html#open) function to open a local file, but Beam already supports several different file systems besides local files. # To leverage that, we can use the [`apache_beam.io.filesystems`](https://beam.apache.org/releases/pydoc/current/apache_beam.io.filesystems.html) module. # # > ℹ️ The [`open`](https://beam.apache.org/releases/pydoc/current/apache_beam.io.filesystems.html#apache_beam.io.filesystems.FileSystems.open) # > function from the Beam filesystem reads bytes, # > it's roughly equivalent to opening a file in `rb` mode. # > To write a file, you would use # > [`create`](https://beam.apache.org/releases/pydoc/current/apache_beam.io.filesystems.html#apache_beam.io.filesystems.FileSystems.open) instead. # + colab={"base_uri": "https://localhost:8080/"} id="ywVbJxegaZbo" outputId="8dd0fdf3-43e8-47db-8442-ed9e88ef6c95" import apache_beam as beam from apache_beam.io.filesystems import FileSystems as beam_fs from apache_beam.options.pipeline_options import PipelineOptions import codecs import csv from typing import Dict, Iterable, List @beam.ptransform_fn @beam.typehints.with_input_types(beam.pvalue.PBegin) @beam.typehints.with_output_types(Dict[str, str]) def ReadCsvFiles(pbegin: beam.pvalue.PBegin, file_patterns: List[str]) -> beam.PCollection[Dict[str, str]]: def expand_pattern(pattern: str) -> Iterable[str]: for match_result in beam_fs.match([pattern])[0].metadata_list: yield match_result.path def read_csv_lines(file_name: str) -> Iterable[Dict[str, str]]: with beam_fs.open(file_name) as f: # Beam reads files as bytes, but csv expects strings, # so we need to decode the bytes into utf-8 strings. for row in csv.DictReader(codecs.iterdecode(f, 'utf-8')): yield dict(row) return ( pbegin | 'Create file patterns' >> beam.Create(file_patterns) | 'Expand file patterns' >> beam.FlatMap(expand_pattern) | 'Read CSV lines' >> beam.FlatMap(read_csv_lines) ) input_patterns = ['data/*.csv'] options = PipelineOptions(flags=[], type_check_additional='all') with beam.Pipeline(options=options) as pipeline: ( pipeline | 'Read CSV files' >> ReadCsvFiles(input_patterns) | 'Print elements' >> beam.Map(print) ) # + [markdown] id="ZyzB_RO9Vs1D" # ## Example: Reading from a SQLite database # # Lets begin by creating a small SQLite local database file. # # Run the _"Creating the SQLite database"_ cell to create a new SQLite3 database with the filename you choose. You can double-click it to see the source code if you want. # + colab={"base_uri": "https://localhost:8080/"} id="EJ58A0AoV02o" cellView="form" outputId="f932e834-8d65-4ddc-a4f8-1fc825c30b41" #@title Creating the SQLite database import sqlite3 database_file = "moon-phases.db" #@param {type:"string"} with sqlite3.connect(database_file) as db: cursor = db.cursor() # Create the moon_phases table. cursor.execute(''' CREATE TABLE IF NOT EXISTS moon_phases ( id INTEGER PRIMARY KEY, phase_emoji TEXT NOT NULL, peak_datetime DATETIME NOT NULL, phase TEXT NOT NULL)''') # Truncate the table if it's already populated. cursor.execute('DELETE FROM moon_phases') # Insert some sample data. insert_moon_phase = 'INSERT INTO moon_phases(phase_emoji, peak_datetime, phase) VALUES(?, ?, ?)' cursor.execute(insert_moon_phase, ('🌕', '2017-12-03 15:47:00', 'Full Moon')) cursor.execute(insert_moon_phase, ('🌗', '2017-12-10 07:51:00', 'Last Quarter')) cursor.execute(insert_moon_phase, ('🌑', '2017-12-18 06:30:00', 'New Moon')) cursor.execute(insert_moon_phase, ('🌓', '2017-12-26 09:20:00', 'First Quarter')) cursor.execute(insert_moon_phase, ('🌕', '2018-01-02 02:24:00', 'Full Moon')) cursor.execute(insert_moon_phase, ('🌗', '2018-01-08 22:25:00', 'Last Quarter')) cursor.execute(insert_moon_phase, ('🌑', '2018-01-17 02:17:00', 'New Moon')) cursor.execute(insert_moon_phase, ('🌓', '2018-01-24 22:20:00', 'First Quarter')) cursor.execute(insert_moon_phase, ('🌕', '2018-01-31 13:27:00', 'Full Moon')) # Query for the data in the table to make sure it's populated. cursor.execute('SELECT * FROM moon_phases') for row in cursor.fetchall(): print(row) # + [markdown] id="8y-bRhPVWai6" # We could use a `FlatMap` transform to receive a SQL query and `yield` each result row, but that would mean creating a new database connection for each query. If we generated a large number of queries, creating that many connections could be a bottleneck. # # It would be nice to create the database connection only once for each worker, and every query could use the same connection if needed. # # We can use a # [custom `DoFn` transform](https://beam.apache.org/documentation/transforms/python/elementwise/pardo/#example-3-pardo-with-dofn-methods) # for this. It allows us to open and close resources, like the database connection, only _once_ per `DoFn` _instance_ by using the `setup` and `teardown` methods. # # > ℹ️ It should be safe to _read_ from a database with multiple concurrent processes using the same connection, but only one process should be _writing_ at once. # + colab={"base_uri": "https://localhost:8080/"} id="Bnpwqr-NV5DF" outputId="5f69a99c-c711-47cf-f13a-c780de57f3e6" import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions import sqlite3 from typing import Iterable, List, Tuple class SQLiteSelect(beam.DoFn): def __init__(self, database_file: str): self.database_file = database_file self.connection = None def setup(self): self.connection = sqlite3.connect(self.database_file) def process(self, query: Tuple[str, List[str]]) -> Iterable[Dict[str, str]]: table, columns = query cursor = self.connection.cursor() cursor.execute(f"SELECT {','.join(columns)} FROM {table}") for row in cursor.fetchall(): yield dict(zip(columns, row)) def teardown(self): self.connection.close() @beam.ptransform_fn @beam.typehints.with_input_types(beam.pvalue.PBegin) @beam.typehints.with_output_types(Dict[str, str]) def SelectFromSQLite( pbegin: beam.pvalue.PBegin, database_file: str, queries: List[Tuple[str, List[str]]], ) -> beam.PCollection[Dict[str, str]]: return ( pbegin | 'Create None' >> beam.Create(queries) | 'SQLite SELECT' >> beam.ParDo(SQLiteSelect(database_file)) ) queries = [ # (table_name, [column1, column2, ...]) ('moon_phases', ['phase_emoji', 'peak_datetime', 'phase']), ('moon_phases', ['phase_emoji', 'phase']), ] options = PipelineOptions(flags=[], type_check_additional='all') with beam.Pipeline(options=options) as pipeline: ( pipeline | 'Read from SQLite' >> SelectFromSQLite(database_file, queries) | 'Print rows' >> beam.Map(print) ) # + [markdown] id="C5Mx_pfNpu_q" # # Writing data # # Your might want to write your data in various output formats. Take a look at the # [Built-in I/O Transforms](https://beam.apache.org/documentation/io/built-in) # page for a list of all the available I/O transforms in Beam. # # If none of those work for you, you might need to create your own output transform. # # > ℹ️ For a more in-depth guide, take a look at the # [Developing a new I/O connector](https://beam.apache.org/documentation/io/developing-io-overview) page. # + [markdown] id="FpM368NEhc-q" # ## Creating an output transform # # The most straightforward way to write data would be to use a `Map` transform to write each element into our desired output format. In most cases, however, this would result in a lot of overhead creating, connecting to, and/or deleting resources. # # Instead, most data services are optimized to write _batches_ of elements at a time. Batch writes only connects to the service once, and can load many elements at a time. # # Here, we discuss two common ways of batching elements for optimized writes: _fixed-sized batches_, and # _[windows](https://beam.apache.org/documentation/programming-guide/#windowing) # of elements_. # + [markdown] id="5gypFFh4hM48" # ## Writing fixed-sized batches # # If the order of the elements _is not_ important, we can simply create fixed-sized batches and write those independently. # # We can use # [`GroupIntoBatches`](https://beam.apache.org/documentation/transforms/python/aggregation/groupintobatches) # to get fixed-sized batches. Note that it expects `(key, value)` pairs. Since `GroupIntoBatches` is an _aggregation_, all the elements in a batch _must_ fit into memory for each worker. # # > ℹ️ `GroupIntoBatches` requires a `(key, value)` pair. For simplicity, this example uses a placeholder `None` key and discards it later. Depending on your data, there might be a key that makes more sense. Using a _balanced_ key, where each key contains around the same number of elements, may help parallelize the batching process. # # Let's create something similar to `WriteToText` but keep it simple with a unique identifier in the file name instead of the file count. # # To write a file using the Beam `filesystems` module, we need to use [`create`](https://beam.apache.org/releases/pydoc/current/apache_beam.io.filesystems.html#apache_beam.io.filesystems.FileSystems.create), which writes `bytes` into the file. # # > ℹ️ To read a file instead, use the [`open`](https://beam.apache.org/releases/pydoc/current/apache_beam.io.filesystems.html#apache_beam.io.filesystems.FileSystems.open) # > function instead. # # For the output type hint, we can use [`PDone`](https://beam.apache.org/releases/pydoc/current/_modules/apache_beam/pvalue.html) to indicate this is the last transform in a given pipeline. # + id="LcRHXwyT8Rrj" import apache_beam as beam from apache_beam.io.filesystems import FileSystems as beam_fs from apache_beam.options.pipeline_options import PipelineOptions import os import uuid from typing import Iterable @beam.ptransform_fn @beam.typehints.with_input_types(str) @beam.typehints.with_output_types(beam.pvalue.PDone) def WriteBatchesToFiles( pcollection: beam.PCollection[str], file_name_prefix: str, file_name_suffix: str = '.txt', batch_size: int = 100, ) -> beam.pvalue.PDone: def expand_pattern(pattern): for match_result in beam_fs.match([pattern])[0].metadata_list: yield match_result.path def write_file(lines: Iterable[str]): file_name = f"{file_name_prefix}-{uuid.uuid4().hex}{file_name_suffix}" with beam_fs.create(file_name) as f: for line in lines: f.write(f"{line}\n".encode('utf-8')) # Remove existing files matching the output file_name pattern. for path in expand_pattern(f"{file_name_prefix}*{file_name_suffix}"): os.remove(path) return ( pcollection # For simplicity we key with `None` and discard it. | 'Key with None' >> beam.WithKeys(lambda _: None) | 'Group into batches' >> beam.GroupIntoBatches(batch_size) | 'Discard key' >> beam.Values() | 'Write file' >> beam.Map(write_file) ) output_file_name_prefix = 'outputs/batch' options = PipelineOptions(flags=[], type_check_additional='all') with beam.Pipeline(options=options) as pipeline: ( pipeline | 'Create file lines' >> beam.Create([ 'Each element must be a string.', 'It writes one element per line.', 'There are no guarantees on the line order.', 'The data might be written into multiple files.', ]) | 'Write batches to files' >> WriteBatchesToFiles( file_name_prefix=output_file_name_prefix, file_name_suffix='.txt', batch_size=3, ) ) # + colab={"base_uri": "https://localhost:8080/"} id="CUklk4JtEbft" outputId="adddbd9f-e66d-4def-ba59-1eafccdbe793" # Lets look at the output files and contents. # !head outputs/batch*.txt # + [markdown] id="hbmPT317hP5K" # ## Writing windows of elements # # If the order of the elements _is_ important, we could batch the elements by windows. This could be useful in _streaming_ pipelines, where we have an indefinite number of incoming elements and we would like to write windows as they are being processed. # # > ℹ️ For more information about windows and triggers, check the [Windowing](https://beam.apache.org/documentation/programming-guide/#windowing) page. # # We use a # [custom `DoFn` transform](https://beam.apache.org/documentation/transforms/python/elementwise/pardo/#example-2-pardo-with-timestamp-and-window-information) # to extract the window start time and end time. # We use this for the file names of the output files. # + id="v_qK300FG9js" import apache_beam as beam from apache_beam.io.filesystems import FileSystems as beam_fs from apache_beam.options.pipeline_options import PipelineOptions from datetime import datetime import time from typing import Any, Dict def unix_time(time_str: str) -> int: return time.mktime(time.strptime(time_str, '%Y-%m-%d %H:%M:%S')) class WithWindowInfo(beam.DoFn): def process(self, element: Any, window=beam.DoFn.WindowParam) -> Iterable[Dict[str, Any]]: yield { 'element': element, 'window_start': window.start.to_utc_datetime(), 'window_end': window.end.to_utc_datetime(), } @beam.ptransform_fn @beam.typehints.with_input_types(str) @beam.typehints.with_output_types(beam.pvalue.PDone) def WriteWindowsToFiles( pcollection: beam.PCollection[str], file_name_prefix: str, file_name_suffix: str = '.txt', ) -> beam.pvalue.PDone: def write_file(batch: Dict[str, Any]): start_date = batch['window_start'].date() start_time = batch['window_start'].time() end_time = batch['window_end'].time() file_name = f"{file_name_prefix}-{start_date}-{start_time}-{end_time}{file_name_suffix}" with beam_fs.create(file_name) as f: for x in batch['element']: f.write(f"{x}\n".encode('utf-8')) return ( pcollection | 'Group all per window' >> beam.GroupBy(lambda _: None) | 'Discard key' >> beam.Values() | 'Get window info' >> beam.ParDo(WithWindowInfo()) | 'Write files' >> beam.Map(write_file) ) output_file_name_prefix = 'outputs/window' window_size_sec = 5 * 60 # 5 minutes options = PipelineOptions(flags=[], type_check_additional='all') with beam.Pipeline(options=options) as pipeline: ( pipeline | 'Create elements' >> beam.Create([ {'timestamp': unix_time('2020-03-19 08:49:00'), 'event': 'login'}, {'timestamp': unix_time('2020-03-19 08:49:20'), 'event': 'view_account'}, {'timestamp': unix_time('2020-03-19 08:50:00'), 'event': 'view_orders'}, {'timestamp': unix_time('2020-03-19 08:51:00'), 'event': 'track_order'}, {'timestamp': unix_time('2020-03-19 09:00:00'), 'event': 'logout'}, ]) | 'With timestamps' >> beam.Map( lambda x: beam.window.TimestampedValue(x, x['timestamp'])) | 'Fixed-sized windows' >> beam.WindowInto( beam.window.FixedWindows(window_size_sec)) | 'To string' >> beam.Map( lambda x: f"{datetime.fromtimestamp(x['timestamp'])}: {x['event']}") | 'Write windows to files' >> WriteWindowsToFiles( file_name_prefix=output_file_name_prefix, file_name_suffix='.txt', ) ) # + colab={"base_uri": "https://localhost:8080/"} id="4QXKKVawTJ2_" outputId="96a84b29-3fd2-46f4-b21b-d3f07daa928b" # Lets look at the output files and contents. # !head outputs/window*.txt # + [markdown] id="gnoz_mWtxSjW" # # What's next? # # * [Programming guide](https://beam.apache.org/documentation/programming-guide) -- learn about all the Apache Beam concepts in more depth. # * [Transform catalog](https://beam.apache.org/documentation/transforms/python/overview) -- check out all the available transforms. # * [Mobile gaming example](https://beam.apache.org/get-started/mobile-gaming-example) -- learn more about windowing, triggers, and streaming through a complete example pipeline. # * [Runners](https://beam.apache.org/documentation/runners/capability-matrix) -- check the available runners, their capabilities, and how to run your pipeline in them.
examples/notebooks/tour-of-beam/reading-and-writing-data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) # # Gumbel Softmax - Generative modelling with variational Autoencoders # # # * Pre-print, published in ICLR 2017 https://arxiv.org/pdf/1611.01144.pdf # # * https://arxiv.org/pdf/1406.2989.pdf # # # ## Experiments # # * **Dataset**: We use the MNIST dataset with fixed binarization for training and evaluation # * **Tricks**: We also found that variance normalization was necessary # * **Network**: We used sigmoid activation functions for binary (Bernoulli) neural networks and softmax activations for categorical variables. # * **Training**: Models were trained using stochastic gradient descent with momentum 0.9. # * **Learning rates**: are chosen from {3e−5, 1e−5, 3e−4, 1e−4, 3e−3, 1e−3}; we select the best learning rate for each estimator using the MNIST validation set, and report performance on the test set. # * **Tasks** Each estimator is evaluated on two tasks: (1) structured output prediction and (2) variational training of generative models. # # # # # Requirements import tensorflow as tf tf.__version__ AUTOTUNE = tf.data.experimental.AUTOTUNE # + import pathlib import os import matplotlib.pyplot as plt import numpy as np import PIL.Image np.set_printoptions(precision=4) # - import pandas as pd # do we need that # # Load and preprocess data import tensorflow_datasets as tfds mnist_data = tfds.load("binarized_mnist", data_dir="/tf/data") mnist_train, mnist_test = mnist_data["train"], mnist_data["test"] assert isinstance(mnist_train, tf.data.Dataset) # ## split upper half / lower half mnist_train.take(1) def split_lower_upper_half(image): print(image["image"]) flat_image = tf.reshape(image["image"], [-1], name=None) upper_half, lower_half = tf.split(flat_image, num_or_size_splits=2, axis=0, num=None, name='split_image_to_upper_lower_half') return upper_half, lower_half#upper_half, lower_half # x, y labeled_train_ds = mnist_train.map(split_lower_upper_half, num_parallel_calls=AUTOTUNE) labeled_train_ds.take(1) labeled_train_ds = labeled_train_ds.shuffle(100,reshuffle_each_iteration=True).batch(100) # # Gumbel Softmax # * https://gist.github.com/ericjang/1001afd374c2c3b7752545ce6d9ed349 # + def sample_gumbel(shape, eps=1e-20): """Sample from Gumbel(0, 1)""" U = tf.random.uniform(shape,minval=0,maxval=1) return -tf.math.log(-tf.math.log(U + eps) + eps) def gumbel_softmax_sample(logits, temperature): """ Draw a sample from the Gumbel-Softmax distribution""" y = logits + sample_gumbel(tf.shape(logits)) return tf.nn.softmax( y / temperature) def gumbel_softmax(logits, temperature, hard=False): """Sample from the Gumbel-Softmax distribution and optionally discretize. Args: logits: [batch_size, n_class] unnormalized log-probs temperature: non-negative scalar hard: if True, take argmax, but differentiate w.r.t. soft sample y Returns: [batch_size, n_class] sample from the Gumbel-Softmax distribution. If hard=True, then the returned sample will be one-hot, otherwise it will be a probabilitiy distribution that sums to 1 across classes """ y = gumbel_softmax_sample(logits, temperature) if hard: k = tf.shape(logits)[-1] #y_hard = tf.cast(tf.one_hot(tf.argmax(y,1),k), y.dtype) y_hard = tf.cast(tf.equal(y,tf.reduce_max(y,1,keep_dims=True)),y.dtype) y = tf.stop_gradient(y_hard - y) + y return y # - # ### 1) Structured output prediction with stochastic binary networks # # * **Task**: Predict lower half of mnist image given top half. # * The minimization objective for this conditional generative model is an importance-sampled estimate of the likelihood objective, Eh∼pθ(hi|xupper) m Pm i=1 log pθ(xlower|hi) # # * where m = 1 is used for training and m = 1000 is used for evaluation. # # * For bernoulli variables they use signmoid activation # * For categorical variables they use # # # + class GumbelSoftmaxStructuredOutputPrediciton(tf.keras.Model): """ Predicts lower half of an mnist image given the top half. """ def __init__(self): super(GumbelSoftmaxStructuredOutputPrediciton, self).__init__() self.setup_model() def setup_model(self): self.input_layer = tf.keras.layers.Dense(200, activation=tf.nn.sigmoid) # [bs,392] => [bs,200] self.categorical_layer = tf.keras.layers.Dense(200, activation=None) # [bs,200] => [bs,200] self.output_layer = tf.keras.layers.Dense(392, activation=tf.nn.sigmoid) # [bs,200] => [bs,392] def call(self, upper_image_half, temperature=0.5): h1 = self.input_layer(upper_image_half) logits = self.categorical_layer(h1) h2 = gumbel_softmax_sample(logits, temperature) lower_image_half = self.output_layer(h2) return lower_image_half sop_model = GumbelSoftmaxStructuredOutputPrediciton() # - optimizer = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9, nesterov=False, name='SGD') # m {3e−5, 1e−5, 3e−4, 1e−4, 3e−3, 1e−3}; # # Loss function # # * They use negative log likelihood from a bernoulli distribution where the probability is a sigmoid. # # * log likelihodd # # # tf.nn.sigmoid_cross_entropy_with_logits # -tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x)) # # + loss_object = tf.keras.losses.BinaryCrossentropy( from_logits=True, label_smoothing=0, name='binary_crossentropy' ) loss_object = tf.keras.losses.MeanSquaredError( name='mean_squared_error' ) # - # # Train loss, test loss # + train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.BinaryAccuracy(name='binary_accuracy', dtype=None, threshold=0.5) #test_loss = tf.keras.metrics.BinaryCrossentropy( name='test_lsss', dtype=None, from_logits=False, label_smoothing=0) # - # # Train step @tf.function def train_step(images_upper_half, images_lower_half): #images_lower_half = tf.cast(x=images_lower_half, dtype=tf.float32) with tf.GradientTape() as tape: predicted_lower_half = sop_model(images_upper_half, training=True) #loss = tf.nn.sigmoid_cross_entropy_with_logits( # labels=images_lower_half, # logits=predicted_lower_half, # name=None #) loss = loss_object(images_lower_half, predicted_lower_half) gradients = tape.gradient(loss, sop_model.trainable_variables) optimizer.apply_gradients(zip(gradients, sop_model.trainable_variables)) train_loss(loss) train_accuracy(images_lower_half,predicted_lower_half) # # Plot Images def plotImages(images_arr,num_images=10): num_imgages = images_arr.shape[0] fig, axes = plt.subplots(1, num_images, figsize=(20,20)) axes = axes.flatten() for img, ax in zip( images_arr, axes): ax.imshow(img) ax.axis('off') plt.tight_layout() plt.show() # + images_labels_batch = np.array(list(labeled_train_ds.take(1).as_numpy_iterator())[0]) num_images = 10 images = images_labels_batch[0][0:num_images].reshape([num_images, -1, 28]) labels = images_labels_batch[1][0:num_images].reshape([num_images, -1, 28 ]) # - plotImages(images) plotImages(labels) # # Training # + EPOCHS = 100 for epoch in range(EPOCHS): # Reset the metrics at the start of the next epoch train_loss.reset_states() #test_loss.reset_states() # plot a few images every now and then if epoch%10==0: images_labels = np.array(list(labeled_train_ds.take(1).as_numpy_iterator())[0]) images, labels = images_labels predictions = sop_model( images , training=False) num_images = 10 images = images_labels_batch[0][0:num_images].reshape([num_images, -1, 28]) labels = images_labels_batch[1][0:num_images].reshape([num_images, -1, 28 ]) predictions = predictions.numpy()[0:num_images].reshape([num_images, -1, 28 ]) plotImages(images) plotImages(labels) plotImages(predictions) for images, labels in labeled_train_ds: train_step(images, labels) #for test_images, test_labels in test_ds: # test_step(test_images, test_labels) template = 'Epoch {}, Loss: {}, Accuracy: {}' print(template.format(epoch + 1, train_loss.result(), train_accuracy.result() * 100))#, # test_loss.result(), # test_accuracy.result() * 100)) # -
2016/gumbel-softmax/Gumbel Softmax - Auto encoder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 64-bit # metadata: # interpreter: # hash: aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49 # name: python3 # --- # # Lab Three # --- # # For this lab we're going to be making and using a bunch of functions. # # Our Goals are: # - Searching our Documentation # - Using built in functions # - Making our own functions # - Combining functions # - Structuring solutions # + # For the following built in functions we didn't touch on them in class. I want you to look for them in the python documentation and implement them. # + # I want you to find a built in function to SWAP CASE on a string. Print it. print("KaCeY".swapcase()) # For example the string "HeY thERe HowS iT GoING" turns into "hEy THerE hOWs It gOing" sample_string = "HeY thERe HowS iT GoING" print(sample_string.swapcase()) # + # I want you to find a built in function to CENTER a string and pad the sides with 4 dashes(-) a side. Print it. # For example the string "Hey There" becomes "----Hey There----" sample_string = "Hey There" print(sample_string.center(17, '-')) # + # I want you to find a built in function to PARTITION a string. Print it. # For example the string "abcdefg.hijklmnop" would come out to be ["abcdefg",".","hijklmnop"] sample_string = "abcdefg.hijklmnop" print(sample_string.partition(".")) # + # I want you to write a function that will take in a number and raise it to the power given. # For example if given the numbers 2 and 3. The math that the function should do is 2^3 and should print out or return 8. Print the output. import math def num_raise(num1, num2): print(math.pow(num1,num2)) num_raise(2,3) # + # I want you to write a function that will take in a list and see how many times a given number is in the list. # For example if the array given is [2,3,5,2,3,6,7,8,2] and the number given is 2 the function should print out or return 3. Print the output. def num_finder(num, my_list): tot = 0 for found in my_list: if found == num: tot += 1 return tot if __name__ == "__main__": answer = num_finder(2, [2,3,5,2,3,6,7,8,2]) print(answer) # + # Use the functions given to create a slope function. The function should be named slope and have 4 parameters. # If you don't remember the slope formula is (y2 - y1) / (x2 - x1) If this doesn't make sense look up `Slope Formula` on google. def division(x, y): return x / y def subtraction(x, y): return x - y def slope(x1, x2, y1, y2): ans1 = subtraction(y2, y1) ans2 = subtraction(x2, x1) result = division(ans1, ans2) print(result) slope(2, 4, 3, 6) # + # Use the functions given to create a distance function. The function should be named function and have 4 parameters. # HINT: You'll need a built in function here too. You'll also be able to use functions written earlier in the notebook as long as you've run those cells. # If you don't remember the distance formula it is the square root of the following ((x2 - x1)^2 + (y2 - y1)^2). If this doesn't make sense look up `Distance Formula` on google. import math def addition(x, y): return x + y def subtraction(x, y): return x - y def num_raise(x, y): return math.pow(x, y) def function(x1, x2, y1, y2): sub_prt1 = subtraction(x2, x1) raise_prt1 = num_raise(sub_prt1, 2) sub_prt2 = subtraction(y2, y1) raise_prt2 = num_raise(sub_prt2, 2) answer = addition(raise_prt1, raise_prt2) print(answer) function(3, 7, 9, 12) # -
JupyterNotebooks/Labs/Lab 3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- import dask.dataframe as dd import geopandas as gpd import folium # leaflet.js py map from folium import plugins import numpy as np import pandas as pd import os print('Required Python libraries:') print('Pandas:', pd.__version__) print('GeoPandas:', gpd.__version__) print('Folium:', folium.__version__) # + # %%time # set data file path parquet_data_folder = '../data/crimes-2017.snappy.parq' print('Loading crime data from: {}'.format(parquet_data_folder)) # load crimes parquet data into dask df crimes = dd.read_parquet(parquet_data_folder, index='Date') # load all data into memory crimes = crimes.persist() print('Crime data loaded into memory.') # log records count and data frame stats print('Crime data stats:') print('---------------------------------------') print('{:,} total records in {} partitions'.format(len(crimes), crimes.npartitions)) print('DataFrame size: {:,}'.format(crimes.size.compute())) # + # get crime geo data for mapping homicides crime_geo = crimes[['PrimaryType', 'Block', 'Description', 'LocationDescription', 'CommunityArea', 'Arrest', 'Domestic', 'Latitude', 'Longitude']].dropna() # get homicides homicides = crime_geo[(crime_geo['PrimaryType']=='HOMICIDE')].compute() print('2017 Chicago homicides data preview:') print('--------------------------------------------------------------------------') print(homicides.head()) print('...') print('Total 2017 homicides:', len(homicides)) # - # get homicides coordinates for the folium heatmap data homicides_geo = homicides[['Latitude', 'Longitude']].values.tolist() # to_records() print(homicides_geo[0:5]) print(homicides.index) # homicides data preview for homicide in homicides[0:3].iterrows(): print(homicide) # + # Chicago center coordinates CHICAGO_COORDINATES = (41.85, -87.68) # leaflet.js map attributions map_attributions = ('&copy; <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> ' 'contributors, &copy; <a href="http://cartodb.com/attributions">CartoDB</a>') # create Chicago homicides map homicides_map = folium.Map(location=CHICAGO_COORDINATES, attr=map_attributions, tiles='Cartodb Positron', #'OpenStreetMap', zoom_start=10, min_zoom=10, control_scale=True) # create homicides heatmap homicides_heatmap = plugins.HeatMap(homicides_geo, radius=8, blur=5, name='2017 Chicago Homicides Heat Map') # add homicides heatamap to leaflet map display homicides_heatmap.add_to(homicides_map) # create marker popups #popups = ['lon:{}<br>lat:{}'.format(lon, lat) for (lat, lon) in homicides_geo] # create homicides marker cluster #plugins.MarkerCluster(locations=homicides_geo, popups=popups, # name='2017 Chicago Homicides').add_to(homicides_map) # create marker callback JS function marker_callback = """\ function (row) { var icon, marker; icon = L.AwesomeMarkers.icon({ icon: "map-marker", markerColor: "red"}); marker = L.marker(new L.LatLng(row[0], row[1])).bindPopup('Test'); marker.setIcon(icon); return marker; }; """ # create faster marker cluster layer with awesome markers plugins.FastMarkerCluster(data=homicides_geo, callback=marker_callback).add_to(homicides_map) # add time lapse? # add fullscreen toggle plugins.Fullscreen( position='topright', title='full screen', title_cancel='exit full screen', force_separate_button=True).add_to(homicides_map) # add layer control for heatmap/markers display toggle folium.LayerControl().add_to(homicides_map) # save map for demo homicides_map.save(os.path.join('../maps/', 'chicago-homicides-2017-map.html')) # show homicides map homicides_map # -
notebooks/chicago-homicides-2017-folium-map.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: CMB 20200115 # language: python # name: cmbenv-20200115 # --- # # Binning a ground schedule # # In this notebook, we take an observing schedule and use `toast_ground_sim.py` to translate it into a depth map # + # Are you using a special reservation for a workshop? # If so, set it here: nersc_reservation = None # Load common tools for all lessons import sys sys.path.insert(0, "..") from lesson_tools import ( check_nersc, fake_focalplane ) nersc_host, nersc_repo, nersc_resv = check_nersc(reservation=nersc_reservation) # Capture C++ output in the jupyter cells # %reload_ext wurlitzer # - # First, we need a focalplane. If one does not already exist, TOAST `pipelines` includes a tool for generating mock hexagonal focalplanes: # ! toast_fake_focalplane.py --help # Here we create a focalplane with 10-degree FOV and a mininimum of 20 pixels: # ! toast_fake_focalplane.py \ # --minpix 20 \ # --out focalplane \ # --fwhm 30 \ # --fov 10 \ # --psd_fknee 5e-2 \ # --psd_NET 1e-3 \ # --psd_alpha 1 \ # --psd_fmin 1e-5 # The actual focalplane ends up having 37 pixels, instead of the minimum of 20. This is because regular packing of the hexagon is quantized. Notice that the final name of the focalplane is `focalplane_37.pkl`. We'll need the name to run the simulation script. We also create the schedule in case it does not yet exist from the other notebook: # ! toast_ground_schedule.py \ # --site-lat "-22.958064" \ # --site-lon "-67.786222" \ # --site-alt 5200 \ # --site-name Atacama \ # --telescope LAT \ # --start "2020-01-01 00:00:00" \ # --stop "2020-01-01 12:00:00" \ # --patch-coord C \ # --patch small_patch,1,40,-40,44,-44 \ # --out schedule.txt # We will use the versatile ground simulation pipeline, `toast_ground_sim.py`, to bin the map. It will be covered in detail in lesson 7 so here we simply write out a parameter file: # %%writefile bin_schedule.par --sample-rate 10.0 --scan-rate 0.3 --scan-accel 10.0 --nside 64 --focalplane focalplane_37.pkl --schedule schedule.txt --out out --simulate-noise --freq 100 --no-destripe --no-binmap --hits --wcov # Now we run the pipeline in parallel on a compute node: # + import subprocess as sp command = "toast_ground_sim.py @bin_schedule.par" runstr = None if nersc_host is not None: runstr = "srun -N 1 -C haswell -n 32 -c 2 --cpu_bind=cores -t 00:05:00" if nersc_resv is not None: runstr = "{} --reservation {}".format(runstr, nersc_resv) else: # Just use mpirun runstr = "mpirun -np 4" runcom = "{} {}".format(runstr, command) print(runcom, flush=True) sp.check_call(runcom, stderr=sp.STDOUT, shell=True) # - # Let's examine the resulting hits and depth map # + import matplotlib.pyplot as plt # %matplotlib inline import healpy hits = healpy.read_map("out/00000000/100/toast_100_telescope_all_time_all_hmap.fits") hits[hits == 0] = healpy.UNSEEN healpy.mollview(hits, unit="hits", title="Total hits") # healpy.graticule(22.5, verbose=False) # - wcov = healpy.read_map("out/00000000/100/toast_100_telescope_all_time_all_wcov.fits") wcov *= 1e12 # from K^2 to uK^2 wcov[wcov == 0] = healpy.UNSEEN healpy.mollview(wcov, unit="$\mu$K$^2$", title="White noise variance", min=1e0, max=1e3) # healpy.graticule(22.5, verbose=False)
lessons/02_Simulated_Scan_Strategies/bin_ground_schedule.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Caso: Análisis de los Logs de Navegación de la web de un Banco # Utilizamos una muestra del Log de Navegación del sitio web de un Banco # ##### Clientes analizados: 6,576 # ##### Horizonte de tiempo: Del 12-Mar-2018 al 21-May-2018 (Poco menos de 3 meses) # + ##pip install holoviews # + ##pip install plotly # + tags=[] import pandas as pd import numpy as np import plotly.graph_objects as go import plotly.express as pex import holoviews as hv hv.extension('bokeh') # - h = pd.read_csv('../data/listanav.csv') h.head() # ## Visualización utilizando Holoviews # + # %%opts Sankey (edge_color='inicio' edge_line_width=2 node_cmap='tab20') # %%opts Sankey (node_alpha=1.0 edge_hover_fill_color='red') # %%opts Sankey [node_sort=False label_position='right' node_width=30 node_sort=True ] # %%opts Sankey [margin=0 padding=0 bgcolor='white'] hv.Sankey(h, kdims=['inicio', 'fin'], vdims=['customer']) # - # ## Visualización utilizando Plotly # + all_nodes = h.inicio.values.tolist() + h.fin.values.tolist() source_indices = [all_nodes.index(country) for country in h.inicio] target_indices = [all_nodes.index(measure) for measure in h.fin] colors = pex.colors.qualitative.D3 node_colors = [np.random.choice(colors) for node in all_nodes] fig = go.Figure(data=[go.Sankey( node = dict( pad = 20, thickness = 20, line = dict(color = 'black', width = 1.0), label = all_nodes, color = node_colors, ), link = dict( source = source_indices, target = target_indices, value = h.customer, ))]) fig.update_layout(title_text='Patrones de Navegación en el Banco', height=600, font=dict(size = 10, color = 'black'), paper_bgcolor='white') fig.show() # - # Elaborado por <NAME> bajo licencia MIT (2022)
notebooks/Lab_16_Sankey_Web.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # #The Efficient Frontier: Markowitz Portfolio optimization in Python # Authors: Dr. <NAME>, <NAME>, Dr. <NAME> # # Notebook released under the Creative Commons Attribution 4.0 License. # # --- # ## Introduction # In this blog post you will learn about the basic idea behind Markowitz portfolio optimization as well as how to do it in Python. We will then show how you can create a simple backtest that rebalances its portfolio in a Markowitz-optimal way. We hope you enjoy it and get a little more enlightened in the process. # # We will start by using random data and only later use actual stock data. This will hopefully help you to get a sense of how to use modelling and simulation to improve your understanding of the theoretical concepts. Don‘t forget that the skill of an algo-trader is to put mathematical models into code and this example is great practice. # # Let's start with importing a few modules, which we need later and produce a series of normally distributed returns. `cvxopt` is a convex solver which we will use for the optimization of the portfolio. # # ## Simulations # + import numpy as np import matplotlib.pyplot as plt import cvxopt as opt from cvxopt import blas, solvers import pandas as pd np.random.seed(123) # Turn off progress printing solvers.options['show_progress'] = False # - # Assume that we have 4 assets, each with a return series of length 1000. We can use `numpy.random.randn` to sample returns from a normal distribution. # + ## NUMBER OF ASSETS n_assets = 4 ## NUMBER OF OBSERVATIONS n_obs = 1000 return_vec = np.random.randn(n_assets, n_obs) # - plt.plot(return_vec.T, alpha=.4); plt.xlabel('time') plt.ylabel('returns') # These return series can be used to create a wide range of portfolios, which all # have different returns and risks (standard deviation). We can produce a wide range # of random weight vectors and plot those portfolios. As we want all our capital to be invested, this vector will have to some to one. # + def rand_weights(n): ''' Produces n random weights that sum to 1 ''' k = np.random.rand(n) return k / sum(k) print rand_weights(n_assets) print rand_weights(n_assets) # - # Next, lets evaluate how many of these random portfolios would perform. Towards this goal we are calculating the mean returns as well as the volatility (here we are using standard deviation). You can also see that there is # a filter that only allows to plot portfolios with a standard deviation of < 2 for better illustration. def random_portfolio(returns): ''' Returns the mean and standard deviation of returns for a random portfolio ''' p = np.asmatrix(np.mean(returns, axis=1)) w = np.asmatrix(rand_weights(returns.shape[0])) C = np.asmatrix(np.cov(returns)) mu = w * p.T sigma = np.sqrt(w * C * w.T) # This recursion reduces outliers to keep plots pretty if sigma > 2: return random_portfolio(returns) return mu, sigma # In the code you will notice the calculation of the return with: # # $$ R = p^T w $$ # # where $R$ is the expected return, $p^T$ is the transpose of the vector for the mean # returns for each time series and w is the weight vector of the portfolio. $p$ is a Nx1 # column vector, so $p^T$ turns into a 1xN row vector which can be multiplied with the # Nx1 weight (column) vector w to give a scalar result. This is equivalent to the dot # product used in the code. Keep in mind that `Python` has a reversed definition of # rows and columns and the accurate `NumPy` version of the previous equation would # be `R = w * p.T` # # Next, we calculate the standard deviation with # # $$\sigma = \sqrt{w^T C w}$$ # # where $C$ is the covariance matrix of the returns which is a NxN matrix. Please # note that if we simply calculated the simple standard deviation with the appropriate weighting using `std(array(ret_vec).T*w)` we would get a slightly different # ’bullet’. This is because the simple standard deviation calculation would not take # covariances into account. In the covariance matrix, the values of the diagonal # represent the simple variances of each asset while the off-diagonals are the variances between the assets. By using ordinary `std()` we effectively only regard the # diagonal and miss the rest. A small but significant difference. # # Lets generate the mean returns and volatility for 500 random portfolios: n_portfolios = 500 means, stds = np.column_stack([ random_portfolio(return_vec) for _ in xrange(n_portfolios) ]) # Upon plotting those you will observe that they form a characteristic parabolic # shape called the ‘Markowitz bullet‘ with the boundaries being called the ‘efficient # frontier‘, where we have the lowest variance for a given expected. plt.plot(stds, means, 'o', markersize=5) plt.xlabel('std') plt.ylabel('mean') plt.title('Mean and standard deviation of returns of randomly generated portfolios'); # ## Markowitz optimization and the Efficient Frontier # # Once we have a good representation of our portfolios as the blue dots show we can calculate the efficient frontier Markowitz-style. This is done by minimising # # $$ w^T C w$$ # # for $w$ on the expected portfolio return $R^T w$ whilst keeping the sum of all the # weights equal to 1: # # $$ \sum_{i}{w_i} = 1 $$ # Here we parametrically run through $R^T w = \mu$ and find the minimum variance # for different $\mu$‘s. This can be done with `scipy.optimise.minimize` but we have # to define quite a complex problem with bounds, constraints and a Lagrange multiplier. Conveniently, the `cvxopt` package, a convex solver, does all of that for us. We used one of their [examples]() with some modifications as shown below. You will notice that there are some conditioning expressions in the code. They are simply needed to set up the problem. For more information please have a look at the `cvxopt` example. # # The `mus` vector produces a series of expected return values $\mu$ in a non-linear and more appropriate way. We will see later that we don‘t need to calculate a lot of these as they perfectly fit a parabola, which can safely be extrapolated for higher values. # + def optimal_portfolio(returns): n = len(returns) returns = np.asmatrix(returns) N = 100 mus = [10**(5.0 * t/N - 1.0) for t in range(N)] # Convert to cvxopt matrices S = opt.matrix(np.cov(returns)) pbar = opt.matrix(np.mean(returns, axis=1)) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n ,1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus] ## CALCULATE RISKS AND RETURNS FOR FRONTIER returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios] ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(returns, risks, 2) x1 = np.sqrt(m1[2] / m1[0]) # CALCULATE THE OPTIMAL PORTFOLIO wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] return np.asarray(wt), returns, risks weights, returns, risks = optimal_portfolio(return_vec) plt.plot(stds, means, 'o') plt.ylabel('mean') plt.xlabel('std') plt.plot(risks, returns, 'y-o'); # - # In yellow you can see the optimal portfolios for each of the desired returns (i.e. the `mus`). In addition, we get the one optimal portfolio returned: print weights # ## Backtesting on real market data # This is all very interesting but not very applied. We next demonstrate how you can create a simple algorithm in [`zipline`](http://github.com/quantopian/zipline) -- the open-source backtester that powers [Quantopian](https://www.quantopian.com) -- to test this optimization on actual historical stock data. # # First, lets load in some historical data using [Quantopian](https://www.quantopian.com)'s `get_pricing()`. data = get_pricing(['IBM', 'GLD', 'XOM', 'AAPL', 'MSFT', 'TLT', 'SHY'], start_date='2005-06-07', end_date='2014-01-27') data.loc['price', :, :].plot(figsize=(8,5)) plt.ylabel('price in $'); # Next, we'll create a `zipline` algorithm by defining two functions -- `initialize()` which is called once before the simulation starts, and `handle_data()` which is called for every trading bar. We then instantiate the algorithm object. # # If you are confused about the syntax of `zipline`, check out the [tutorial](http://nbviewer.ipython.org/github/quantopian/zipline/blob/master/docs/tutorial.ipynb). # + import zipline from zipline.api import (add_history, history, set_slippage, slippage, set_commission, commission, order_target_percent) from zipline import TradingAlgorithm def initialize(context): ''' Called once at the very beginning of a backtest (and live trading). Use this method to set up any bookkeeping variables. The context object is passed to all the other methods in your algorithm. Parameters context: An initialized and empty Python dictionary that has been augmented so that properties can be accessed using dot notation as well as the traditional bracket notation. Returns None ''' # Register history container to keep a window of the last 100 prices. add_history(100, '1d', 'price') # Turn off the slippage model set_slippage(slippage.FixedSlippage(spread=0.0)) # Set the commission model (Interactive Brokers Commission) set_commission(commission.PerShare(cost=0.01, min_trade_cost=1.0)) context.tick = 0 def handle_data(context, data): ''' Called when a market event occurs for any of the algorithm's securities. Parameters data: A dictionary keyed by security id containing the current state of the securities in the algo's universe. context: The same context object from the initialize function. Stores the up to date portfolio as well as any state variables defined. Returns None ''' # Allow history to accumulate 100 days of prices before trading # and rebalance every day thereafter. context.tick += 1 if context.tick < 100: return # Get rolling window of past prices and compute returns prices = history(100, '1d', 'price').dropna() returns = prices.pct_change().dropna() try: # Perform Markowitz-style portfolio optimization weights, _, _ = optimal_portfolio(returns.T) # Rebalance portfolio accordingly for stock, weight in zip(prices.columns, weights): order_target_percent(stock, weight) except ValueError as e: # Sometimes this error is thrown # ValueError: Rank(A) < p or Rank([P; A; G]) < n pass # Instantinate algorithm algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data) # Run algorithm results = algo.run(data.swapaxes(2, 0, 1)) results.portfolio_value.plot() # - # As you can see, the performance here is quite good, even through the 2008 financial crisis. This is most likey due to our universe selection and shouldn't always be expected. Increasing the number of stocks in the universe might reduce the volatility as well. Please let us know in the comments section if you had any success with this strategy and how many stocks you used. # ## Conclusions # # In this blog, co-written by Quantopian friend [Dr. <NAME>](http://drtomstarke.com/), we wanted to provide an intuitive and gentle introduction to Markowitz portfolio optimization which still remains relevant today. By using simulation of various random portfolios we have seen that certain portfolios perform better than others. Convex optimization using `cvxopt` allowed us to then numerically determine the portfolios that live on the *efficient frontier*. The zipline backtest serves as an example but also shows compelling performance. # # ## Next steps # # * Clone this notebook in the [Quantopian Research Platform](http://blog.quantopian.com/quantopian-research-your-backtesting-data-meets-ipython-notebook/) and run it on your own to see if you can enhance the performance. # * You can also download just the notebook for use in your own environment [here](). # * In a future blog post we will outline the connections to Kelly optimization which also tells us the amount of leverage to use. # * We are currently in the process of adding `cvxopt` to the Quantopian backtester -- stay tuned!
research/Markowitz-Quantopian-Research.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simple Linear Regression. Minimal example # # Using the same code as before, please solve the following exercises # 6. Create a function f(x,z) = 13*xs + 7*zs - 12. Does the algorithm work in the same way? # # # Useful tip: When you change something, don't forget to RERUN all cells. This can be done easily by clicking: # Kernel -> Restart & Run All # If you don't do that, your algorithm will keep the OLD values of all parameters. # # ## Solution # # Find the line of code, where we declare the targets, and change it from: # # targets = 2*xs - 3*zs + 5 + noise # # to # # targets = 13*xs + 7*zs - 12 + noise # # Some takeaways: # # 1. The solution is almost found, judging by the values of the loss and those of the weights and biases. # 2. I would say this algorithm is AS effective as that in the lecture. Still needs more iterations. # 3. All else equal, the values of the loss are higher, as the values of the targets are higher. # 4. The same algorithm and methodology solved a completely different problem. That's machine learning for you. # # See you in the next section! # ### Import the relevant libraries # + # We must always import the relevant libraries for our problem at hand. NumPy is a must for this example. import numpy as np # matplotlib and mpl_toolkits are not necessary. We employ them for the sole purpose of visualizing the results. import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # - # ### Generate random input data to train on # + # First, we should declare a variable containing the size of the training set we want to generate. observations = 1000000 # We will work with two variables as inputs. You can think about them as x1 and x2 in our previous examples. # We have picked x and z, since it is easier to differentiate them. # We generate them randomly, drawing from an uniform distribution. There are 3 arguments of this method (low, high, size). # The size of xs and zs is observations by 1. In this case: 1000 x 1. xs = np.random.uniform(low=-10, high=10, size=(observations,1)) zs = np.random.uniform(-10, 10, (observations,1)) # Combine the two dimensions of the input into one input matrix. # This is the X matrix from the linear model y = x*w + b. # column_stack is a Numpy method, which combines two vectors into a matrix. Alternatives are stack, dstack, hstack, etc. inputs = np.column_stack((xs,zs)) # Check if the dimensions of the inputs are the same as the ones we defined in the linear model lectures. # They should be n x k, where n is the number of observations, and k is the number of variables, so 1000 x 2. print (inputs.shape) # - # ### Generate the targets we will aim at # + # We want to "make up" a function, use the ML methodology, and see if the algorithm has learned it. # We add a small random noise to the function i.e. f(x,z) = 2x - 3z + 5 + <small noise> noise = np.random.uniform(-1, 1, (observations,1)) # Produce the targets according to the f(x,z) = 2x - 3z + 5 + noise definition. # In this way, we are basically saying: the weights should be 2 and -3, while the bias is 5. targets = 13*xs + 7*zs - 12 + noise # Check the shape of the targets just in case. It should be n x m, where m is the number of output variables, so 1000 x 1. print (targets.shape) # - # ### Plot the training data # The point is to see that there is a strong trend that our model should learn to reproduce. # + # In order to use the 3D plot, the objects should have a certain shape, so we reshape the targets. # The proper method to use is reshape and takes as arguments the dimensions in which we want to fit the object. targets = targets.reshape(observations,) # Plotting according to the conventional matplotlib.pyplot syntax # Declare the figure fig = plt.figure() # A method allowing us to create the 3D plot ax = fig.add_subplot(111, projection='3d') # Choose the axes. ax.plot(xs, zs, targets) # Set labels ax.set_xlabel('xs') ax.set_ylabel('zs') ax.set_zlabel('Targets') # You can fiddle with the azim parameter to plot the data from different angles. Just change the value of azim=100 # to azim = 0 ; azim = 200, or whatever. Check and see what happens. ax.view_init(azim=100) # So far we were just describing the plot. This method actually shows the plot. plt.show() # We reshape the targets back to the shape that they were in before plotting. # This reshaping is a side-effect of the 3D plot. Sorry for that. targets = targets.reshape(observations,1) # - # ### Initialize variables # + # We will initialize the weights and biases randomly in some small initial range. # init_range is the variable that will measure that. # You can play around with the initial range, but we don't really encourage you to do so. # High initial ranges may prevent the machine learning algorithm from learning. init_range = 0.1 # Weights are of size k x m, where k is the number of input variables and m is the number of output variables # In our case, the weights matrix is 2x1 since there are 2 inputs (x and z) and one output (y) weights = np.random.uniform(low=-init_range, high=init_range, size=(2, 1)) # Biases are of size 1 since there is only 1 output. The bias is a scalar. biases = np.random.uniform(low=-init_range, high=init_range, size=1) #Print the weights to get a sense of how they were initialized. print (weights) print (biases) # - # ### Set a learning rate # Set some small learning rate (denoted eta in the lecture). # 0.02 is going to work quite well for our example. Once again, you can play around with it. # It is HIGHLY recommended that you play around with it. learning_rate = 0.02 # ### Train the model # We iterate over our training dataset 100 times. That works well with a learning rate of 0.02. # The proper number of iterations is something we will talk about later on, but generally # a lower learning rate would need more iterations, while a higher learning rate would need less iterations # keep in mind that a high learning rate may cause the loss to diverge to infinity, instead of converge to 0. for i in range (100): # This is the linear model: y = xw + b equation outputs = np.dot(inputs,weights) + biases # The deltas are the differences between the outputs and the targets # Note that deltas here is a vector 1000 x 1 deltas = outputs - targets # We are considering the L2-norm loss, but divided by 2, so it is consistent with the lectures. # Moreover, we further divide it by the number of observations. # This is simple rescaling by a constant. We explained that this doesn't change the optimization logic, # as any function holding the basic property of being lower for better results, and higher for worse results # can be a loss function. loss = np.sum(deltas ** 2) / 2 / observations # We print the loss function value at each step so we can observe whether it is decreasing as desired. print (loss) # Another small trick is to scale the deltas the same way as the loss function # In this way our learning rate is independent of the number of samples (observations). # Again, this doesn't change anything in principle, it simply makes it easier to pick a single learning rate # that can remain the same if we change the number of training samples (observations). # You can try solving the problem without rescaling to see how that works for you. deltas_scaled = deltas / observations # Finally, we must apply the gradient descent update rules from the relevant lecture. # The weights are 2x1, learning rate is 1x1 (scalar), inputs are 1000x2, and deltas_scaled are 1000x1 # We must transpose the inputs so that we get an allowed operation. weights = weights - learning_rate * np.dot(inputs.T,deltas_scaled) biases = biases - learning_rate * np.sum(deltas_scaled) # The weights are updated in a linear algebraic way (a matrix minus another matrix) # The biases, however, are just a single number here, so we must transform the deltas into a scalar. # The two lines are both consistent with the gradient descent methodology. # ### Print weights and biases and see if we have worked correctly. # + # We print the weights and the biases, so we can see if they have converged to what we wanted. # When declared the targets, following the f(x,z), we knew the weights should be 2 and -3, while the bias: 5. print (weights, biases) # Note that they may be convergING. So more iterations are needed. # - # ### Plot last outputs vs targets # Since they are the last ones at the end of the training, they represent the final model accuracy. <br/> # The closer this plot is to a 45 degree line, the closer target and output values are. # We print the outputs and the targets in order to see if they have a linear relationship. # Again, that's not needed. Moreover, in later lectures, that would not even be possible. plt.plot(outputs,targets) plt.xlabel('outputs') plt.ylabel('targets') plt.show()
21_ML_GradualDescent_Python_S43_L301/Minimal_example_Exercise_6_Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Grid Generation with Interactive Widgets # # # This notebook demostrates how to use the interative widgets. # # See a version of it in action: from IPython.display import Audio,Image, YouTubeVideo YouTubeVideo('S5SG9km2f_A', height=450, width=900) # ## Main Tutorial # + # %matplotlib inline import warnings warnings.simplefilter('ignore') import numpy as np import matplotlib.pyplot as plt import pandas import geopandas from pygridgen import Gridgen from pygridtools import viz, iotools def plotter(x, y, **kwargs): figsize = kwargs.pop('figsize', (9, 9)) fig, ax = plt.subplots(figsize=figsize) ax.set_aspect('equal') viz.plot_domain(domain, betacol='beta', ax=ax) ax.set_xlim([0, 25]) ax.set_ylim([0, 25]) return viz.plot_cells(x, y, ax=ax, **kwargs) # - # ## Loading and plotting the boundary data # + domain = geopandas.read_file('basic_data/domain.geojson') fig, ax = plt.subplots(figsize=(9, 9), subplot_kw={'aspect':'equal'}) fig = viz.plot_domain(domain, betacol='beta', ax=ax) # - # ## Generating a grid with `pygridgen`, plotting with `pygridtools` # + grid = Gridgen(domain.geometry.x, domain.geometry.y, domain.beta, shape=(50, 50), ul_idx=2) fig_orig, artists = plotter(grid.x, grid.y) # - # ### Interactively manipulate the `Focus` focus, focuser_widget = iotools.interactive_grid_focus(grid, n_points=3, plotfxn=plotter) focuser_widget # ### Interactively change the number of nodes in the grid # # (Notice how the focus stay where we want) reshaped, shaper_widget = iotools.interactive_grid_shape(grid, max_n=100, plotfxn=plotter) shaper_widget fig_orig # ### Save, load, and recreate the altered grid without widgets # + import json from pathlib import Path from tempfile import TemporaryDirectory with TemporaryDirectory() as td: f = Path(td, 'widget_grid.json') with f.open('w') as grid_write: json.dump(grid.to_spec(), grid_write) with f.open('r') as grid_read: spec = json.load(grid_read) new_grid = Gridgen.from_spec(spec) plotter(new_grid.x, new_grid.y)
docs/tutorial/04_InteractiveWidgets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/limontec/WhatsApp-GDrive-Downloader/blob/master/WGDD.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="hVKrdyYCN1BD" # #**WhatsApp GDrive Downloader** # # Inspiration: https://github.com/B16f00t/whapa # # Adaptation by https://t.me/limontec # # DISCLAIMER: TO BE USED FOR EDUCATIONAL PURPOSES ONLY. # # Don't type your password... go to the URL: https://myaccount.google.com/apppasswords # # Then select Application: Other. # # Write down: Whapa # # A password will be display. # # Then you must write the password bellow. # # v0.0.9 # + id="qPKVUTktNk3p" import os from google.colab import files from IPython.display import clear_output #@markdown <center><h2>Download your WhatsApp data from Google Drive</h2></center><br> if not os.path.exists(f'whapa/libs/whagodri.py'): # !git clone https://github.com/B16f00t/whapa # !cd whapa && pip install --upgrade -r ./doc/requirements.txt # !pip install --upgrade gpsoauth==0.4.2 #@markdown ####Type your info: email = "<EMAIL>" # @param {type:"string"} password = "<PASSWORD>" # @param {type:"string"} phone = "your_phone_number_with_country_code" # @param {type:"string"} with open('whapa/cfg/settings.cfg', 'w') as cfg: cfg.write('[report]\ncompany =\nrecord =\nunit =\nexaminer =\nnotes =\n\n[google-auth]\ngmail = '+email+'\n# Optional. The account password or app password when using 2FA.\npassword = '+password+'\n# Optional. The result of "adb shell settings get secure android_id".\nandroid_id = 0000000000000000\n# Optional. Enter the backup country code + phonenumber be synchronized, otherwise it synchronizes all backups.\n# You can specify a list of celnumbr = BackupNumber1, BackupNumber2, ...\ncelnumbr = '+phone+'\n\n[icloud-auth]\nicloud = <EMAIL>\npassw = <PASSWORD>') #@markdown Please, try run the script... #@markdown <br/>If shows info about your WhatsApp backup... #@markdown <br/>check the box below to download all content and run again! download_all = False #@param {type:"boolean"} if not (download_all): clear_output() # !python3 whapa/libs/whagodri.py -i wppbk = 'whatsapp_backup_'+phone def downloadFile(): try: # !cd /content{phone}/files/ && zip -9 -r {wppbk}.zip Backups Databases Media _INTERNAL_FILES_ clear_output() # !mv /content{phone}/files/{wppbk}.zip /content/ files.download(wppbk+'.zip') # !echo "If download does not start, download the file manually from the directory in the left corner. /content/whatsapp_backup_{phone}.zip" except FileNotFoundError: print("File not found!") except: print("Something else went wrong, try download manually") if (download_all): # !python3 whapa/libs/whagodri.py -s downloadFile()
WGDD.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .java // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Java // language: java // name: java // --- // # Inference with your model // // This is the third and final tutorial of our [beginner tutorial series](https://github.com/deepjavalibrary/djl/tree/master/jupyter/tutorial) that will take you through creating, training, and running inference on a neural network. In this tutorial, you will learn how to execute your image classification model for a production system. // // In the [previous tutorial](02_train_your_first_model.ipynb), you successfully trained your model. Now, we will learn how to implement a `Translator` to convert between POJO and `NDArray` as well as a `Predictor` to run inference. // // // ## Preparation // // This tutorial requires the installation of the Java Jupyter Kernel. To install the kernel, see the [Jupyter README](https://github.com/deepjavalibrary/djl/blob/master/jupyter/README.md). // + // Add the snapshot repository to get the DJL snapshot artifacts // // %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/ // Add the maven dependencies // %maven ai.djl:api:0.13.0 // %maven ai.djl:model-zoo:0.13.0 // %maven ai.djl.mxnet:mxnet-engine:0.13.0 // %maven ai.djl.mxnet:mxnet-model-zoo:0.13.0 // %maven org.slf4j:slf4j-api:1.7.32 // %maven org.slf4j:slf4j-simple:1.7.32 // %maven net.java.dev.jna:jna:5.8.0 // See https://github.com/deepjavalibrary/djl/blob/master/engines/mxnet/mxnet-engine/README.md // for more MXNet library selection options // %maven ai.djl.mxnet:mxnet-native-auto:1.8.0 // - import java.awt.image.*; import java.nio.file.*; import java.util.*; import java.util.stream.*; import ai.djl.*; import ai.djl.basicmodelzoo.basic.*; import ai.djl.ndarray.*; import ai.djl.modality.*; import ai.djl.modality.cv.*; import ai.djl.modality.cv.util.NDImageUtils; import ai.djl.translate.*; // ## Step 1: Load your handwritten digit image // // We will start by loading the image that we want to run our model to classify. var img = ImageFactory.getInstance().fromUrl("https://resources.djl.ai/images/0.png"); img.getWrappedImage(); // ## Step 2: Load your model // // Next, we need to load the model to run inference with. This model should have been saved to the `build/mlp` directory when running the [previous tutorial](02_train_your_first_model.ipynb). Path modelDir = Paths.get("build/mlp"); Model model = Model.newInstance("mlp"); model.setBlock(new Mlp(28 * 28, 10, new int[] {128, 64})); model.load(modelDir); // In addition to loading a local model, you can also find pretrained models within our [model zoo](http://docs.djl.ai/docs/model-zoo.html). See more options in our [model loading documentation](http://docs.djl.ai/docs/load_model.html). // // ## Step 3: Create a `Translator` // // The [`Translator`](https://javadoc.io/static/ai.djl/api/0.13.0/index.html?ai/djl/translate/Translator.html) is used to encapsulate the pre-processing and post-processing functionality of your application. The input to the processInput and processOutput should be single data items, not batches. Translator<Image, Classifications> translator = new Translator<Image, Classifications>() { @Override public NDList processInput(TranslatorContext ctx, Image input) { // Convert Image to NDArray NDArray array = input.toNDArray(ctx.getNDManager(), Image.Flag.GRAYSCALE); return new NDList(NDImageUtils.toTensor(array)); } @Override public Classifications processOutput(TranslatorContext ctx, NDList list) { // Create a Classifications with the output probabilities NDArray probabilities = list.singletonOrThrow().softmax(0); List<String> classNames = IntStream.range(0, 10).mapToObj(String::valueOf).collect(Collectors.toList()); return new Classifications(classNames, probabilities); } @Override public Batchifier getBatchifier() { // The Batchifier describes how to combine a batch together // Stacking, the most common batchifier, takes N [X1, X2, ...] arrays to a single [N, X1, X2, ...] array return Batchifier.STACK; } }; // ## Step 4: Create Predictor // // Using the translator, we will create a new [`Predictor`](https://javadoc.io/static/ai.djl/api/0.13.0/index.html?ai/djl/inference/Predictor.html). The predictor is the main class to orchestrate the inference process. During inference, a trained model is used to predict values, often for production use cases. The predictor is NOT thread-safe, so if you want to do prediction in parallel, you should call newPredictor multiple times to create a predictor object for each thread. var predictor = model.newPredictor(translator); // ## Step 5: Run inference // // With our predictor, we can simply call the [predict](https://javadoc.io/static/ai.djl/api/0.13.0/ai/djl/inference/Predictor.html#predict-I-) method to run inference. For better performance, you can also call [batchPredict](https://javadoc.io/static/ai.djl/api/0.13.0/ai/djl/inference/Predictor.html#batchPredict-java.util.List-) with a list of input items. Afterwards, the same predictor should be used for further inference calls. // + var classifications = predictor.predict(img); classifications // - // ## Summary // // Now, you've successfully built a model, trained it, and run inference. Congratulations on finishing the [beginner tutorial series](https://github.com/deepjavalibrary/djl/tree/master/jupyter/tutorial). After this, you should read our other [examples](https://github.com/deepjavalibrary/djl/tree/master/examples) and [jupyter notebooks](https://github.com/deepjavalibrary/djl/tree/master/jupyter) to learn more about DJL. // // You can find the complete source code for this tutorial in the [examples project](https://github.com/deepjavalibrary/djl/blob/master/examples/src/main/java/ai/djl/examples/inference/ImageClassification.java).
jupyter/tutorial/03_image_classification_with_your_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Euler Problem 76 # ================ # # It is possible to write five as a sum in exactly six different ways: # # 4 + 1 # 3 + 2 # 3 + 1 + 1 # 2 + 2 + 1 # 2 + 1 + 1 + 1 # 1 + 1 + 1 + 1 + 1 # # How many different ways can one hundred be written as a sum of at least two positive integers? # + # Generate pentagonal numbers # p(k) = k * (3k-1) / 2, k not zero. def pentagonal(N): a, b = 1, 2 delta = 4 sgn = 1 while a <= N: yield sgn, a a += delta if b <= N: yield sgn, b b += delta + 1 delta += 3 sgn = -sgn N = 100 partitions = [0] * (N+1) partitions[0] = 1 for n in range(1, N+1): for sgn, g in pentagonal(n): partitions[n] += sgn * partitions[n - g] print(partitions[N]) # - # **Explanation:** Uses the pentagonal number recurrence for the partition counting function. # # http://en.wikipedia.org/wiki/Pentagonal_number_theorem#Partition_recurrence
Euler 076 - Counting summations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## Advanced Lane Finding Project # # The goals / steps of this project are the following: # # * Compute the camera calibration matrix and distortion coefficients given a set of chessboard images. # * Apply a distortion correction to raw images. # * Use color transforms, gradients, etc., to create a thresholded binary image. # * Apply a perspective transform to rectify binary image ("birds-eye view"). # * Detect lane pixels and fit to find the lane boundary. # * Determine the curvature of the lane and vehicle position with respect to center. # * Warp the detected lane boundaries back onto the original image. # * Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position. # # --- # ## First, I'll compute the camera calibration using chessboard images # + import numpy as np import cv2 import glob import matplotlib.pyplot as plt # %matplotlib inline # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6*9,3), np.float32) objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob('../camera_cal/calibration*.jpg') # Step through the list and search for chessboard corners for fname in images: ori_img = cv2.imread(fname) img = ori_img gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (9,6),None) # If found, add object points, image points if ret == True: #print(corners.shape) objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners cv2.drawChessboardCorners(img, (9,6), corners, ret) #cv2.imshow(fname,img) #cv2.waitKey(500) cv2.destroyAllWindows() # - # ## Helper functions for binary threshholding and perspective transformation # + #compute gradient dir thresh hold def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)): # 2) Take the gradient in x and y separately sobel_x = cv2.Sobel(gray, cv2.CV_64F,1,0,ksize = sobel_kernel) sobel_y = cv2.Sobel(gray, cv2.CV_64F,0,1,ksize = sobel_kernel) # 3) Take the absolute value of the x and y gradients sobel_x_abs = np.absolute(sobel_x) sobel_y_abs = np.absolute(sobel_y) # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient arctan = np.arctan2(sobel_y_abs, sobel_x_abs) # 5) Create a binary mask where direction thresholds are met dir_binary = np.zeros_like(arctan) # 6) Return this mask as your binary_output image dir_binary[(arctan >=thresh[0]) & (arctan <= thresh[1]) ] =1 return dir_binary #compute the magnitute threshold def mag_thresh(img, sobel_kernel=9, mag_thresh=(0, 255)): # 2) Take the derivative in x or y given orient = 'x' or 'y' sobel_x = cv2.Sobel(img, cv2.CV_64F,1,0,sobel_kernel) sobel_y = cv2.Sobel(img, cv2.CV_64F,0,1,sobel_kernel) # 3) Take the absolute value of the derivative or gradient abs_sobel =np.sqrt(sobel_x**2 + sobel_y**2) # 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8 scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel)) # 5) Create a mask of 1's where the scaled gradient magnitude mag_binary = np.zeros_like(scaled_sobel) mag_binary[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1 return mag_binary #gradient threshhold on the x dir def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)): sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0,ksize =sobel_kernel ) # Take the derivative in x abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx)) #applying the mask sxbinary = np.zeros_like(scaled_sobel) sxbinary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1 return sxbinary #color threshold on staturation def color_threshold(img,s_thresh_min=170,s_thresh_max = 255 ): s_binary = np.zeros_like(img) s_binary[(img >= s_thresh_min) & (img <= s_thresh_max)] = 1 return s_binary #given image do perspective transform def perspect_transform(img,src,dst): img_size = (img.shape[1], img.shape[0]) M =cv2.getPerspectiveTransform(src,dst) Minv =cv2.getPerspectiveTransform(dst,src) warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR) return Minv,warped #take the image as well as the veteices of the area of the interest plot the area on the graph def show_area_interest(undist,src,dst): plt.imshow(undist) plt.plot(src[:,0],src[:,1] , 'b--', lw=2) plt.plot(dst[:,0],dst[:,1] , 'r--', lw=2) return #take in the undist image apply threshold and return bird eye transfromed image and the inverse transform matrix def bird_eye_transform(undist): hls = cv2.cvtColor(undist, cv2.COLOR_RGB2HLS) s_channel = hls[:,:,2] l_channel = hls[:,:,1] gray = cv2.cvtColor(undist,cv2.COLOR_BGR2GRAY) #apply a color transformed binary image # Sobel x sxbinary = abs_sobel_thresh(gray,sobel_kernel=3,thresh=(20, 100)) #directional gradient threshold dir_binary = dir_threshold(gray, sobel_kernel=3, thresh=(0.7, 1.3)) #magnitute threshold mag_binary = mag_thresh(gray, sobel_kernel=3, mag_thresh=(30, 150)) # Threshold color channel s_binary = color_threshold(s_channel,s_thresh_min=170,s_thresh_max = 255 ) # Threshold light channel l_binary = color_threshold(l_channel,s_thresh_min=100,s_thresh_max = 255 ) #combine color mask and x gradient mask combined_binary = np.zeros_like(sxbinary) grad_binary = ((mag_binary ==1) &(sxbinary == 1) & (dir_binary ==1)) combined_binary[((s_binary == 1) & (l_binary == 1))| (sxbinary == 1)] = 1 color_binary = np.dstack(( np.zeros_like(sxbinary), grad_binary, s_binary)) * 255 #define transform source and dst points and perform transform src = np.float32([[580,458],[280,680],[1095,675],[732,457]]) dst = np.float32([[200,0], [200,680], [1000,680],[1000,0]]) Minv, bird_eye = perspect_transform(combined_binary,src, dst) return Minv, bird_eye # + #now we have the mtx, dist of the camera so we can undistort the camera # #%pylab inline img = cv2.imread("../test_images/shadow2.png") #undistrot the image #check the distortion of the camera ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) undist = cv2.undistort(img, mtx, dist, None, mtx) # Convert to HLS color space and separate the S channel # Note: img is the undistorted image hls = cv2.cvtColor(undist, cv2.COLOR_RGB2HLS) h_channel = hls[:,:,0] l_channel = hls[:,:,1] s_channel = hls[:,:,2] gray = cv2.cvtColor(undist,cv2.COLOR_BGR2GRAY) #apply a color transformed binary image # Sobel x sxbinary = abs_sobel_thresh(gray,sobel_kernel=3,thresh=(20, 100)) #directional gradient threshold dir_binary = dir_threshold(gray, sobel_kernel=3, thresh=(0.7, 1.3)) #magnitute threshold mag_binary = mag_thresh(gray, sobel_kernel=3, mag_thresh=(30, 150)) # Threshold saturation channel s_binary = color_threshold(s_channel,s_thresh_min=220,s_thresh_max = 255 ) # Threshold light channel l_binary = color_threshold(l_channel,s_thresh_min=100,s_thresh_max = 255 ) #combine color mask and x gradient mask combined_binary = np.zeros_like(sxbinary) grad_binary = ((mag_binary ==1) &(sxbinary == 1) & (dir_binary ==1)) combined_binary[(s_binary == 1) & (l_binary ==1)| (sxbinary == 1)] = 1 color_binary = np.dstack(( grad_binary, np.zeros_like(sxbinary), s_binary)) * 255 #bird eye perspective transform src = np.float32([[580,458],[280,680],[1095,675],[732,457]]) dst = np.float32([[200,0], [200,680], [1000,680],[1000,0]]) show_area_interest(undist,src,dst) Minv, bird_eye = perspect_transform(combined_binary,src, dst) # Plotting thresholded images f, (ax1, ax2, ax3) = plt.subplots(1, 3 ,figsize=(20,10)) ax1.set_title('Stacked thresholds') ax1.imshow(l_binary) ax2.set_title('Combined S channel and gradient thresholds') ax2.imshow(combined_binary, cmap='gray') ax3.set_title('Bird eye view') ax3.imshow(bird_eye, cmap='gray') # - # # Helper Function for Finding Lane and Ploynomial fit # + #find the pixels that in both right lane and left lane stored in the array as well as output with rectangular def find_lane_pixels(binary_warped): # Take a histogram of the bottom half of the image histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0) # Create an output image to draw on and visualize the result out_img = np.dstack((binary_warped, binary_warped, binary_warped)) #plt.imshow(out_img) # Find the peak of the left and right halves of the histogram # These will be the starting point for the left and right lines midpoint = np.int(histogram.shape[0]//2) leftx_base = np.argmax(histogram[:midpoint]) rightx_base = np.argmax(histogram[midpoint:]) + midpoint # HYPERPARAMETERS # Choose the number of sliding windows nwindows = 9 # Set the width of the windows +/- margin margin = 100 # Set minimum number of pixels found to recenter window minpix = 50 # Set height of windows - based on nwindows above and image shape window_height = np.int(binary_warped.shape[0]//nwindows) # Identify the x and y positions of all nonzero pixels in the image nonzero = binary_warped.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # Current positions to be updated later for each window in nwindows leftx_current = leftx_base rightx_current = rightx_base # Create empty lists to receive left and right lane pixel indices left_lane_inds = [] right_lane_inds = [] # Step through the windows one by one for window in range(nwindows): # Identify window boundaries in x and y (and right and left) win_y_low = binary_warped.shape[0] - (window+1)*window_height win_y_high = binary_warped.shape[0] - window*window_height ### TO-DO: Find the four below boundaries of the window ### win_xleft_low = leftx_current - margin # Update this win_xleft_high = leftx_current + margin # Update this win_xright_low = rightx_current - margin # Update this win_xright_high = rightx_current + margin # Update this # Draw the windows on the visualization image cv2.rectangle(out_img,(win_xleft_low,win_y_low), (win_xleft_high,win_y_high),(0,255,0), 2) cv2.rectangle(out_img,(win_xright_low,win_y_low), (win_xright_high,win_y_high),(0,255,0), 2) ### TO-DO: Identify the nonzero pixels in x and y within the window ### good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0] good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0] #print(nonzeroy.shape,nonzerox.shape) # Append these indices to the lists left_lane_inds.append(good_left_inds) right_lane_inds.append(good_right_inds) ### TO-DO: If you found > minpix pixels, recenter next window ### ### (`right` or `leftx_current`) on their mean position ### if len(good_left_inds) > minpix: leftx_current = int(np.mean(nonzerox[good_left_inds])) if len(good_right_inds) > minpix: rightx_current = int(np.mean(nonzerox[good_right_inds])) # Concatenate the arrays of indices (previously was a list of lists of pixels) try: left_lane_inds = np.concatenate(left_lane_inds) right_lane_inds = np.concatenate(right_lane_inds) except ValueError: # Avoids an error if the above is not implemented fully pass # Extract left and right line pixel positions leftx = nonzerox[left_lane_inds] lefty = nonzeroy[left_lane_inds] rightx = nonzerox[right_lane_inds] righty = nonzeroy[right_lane_inds] return leftx, lefty, rightx, righty, out_img #plot the polynomial fit for both the right and left lane and plot the left pixel and right pixels in different color def fit_polynomial(binary_warped, poly = False): # Find our lane pixels first if poly: leftx, lefty, rightx, righty, out_img = poly_search_lane(binary_warped) else: leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped) ### TO-DO: Fit a second order polynomial to each using `np.polyfit` ### left_fit = np.polyfit(lefty,leftx,2) right_fit = np.polyfit(righty,rightx,2) #print(left_fit) #print(right_fit) # Generate x and y values for plotting ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] ) try: left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] except TypeError: # Avoids an error if `left` and `right_fit` are still none or incorrect print('The function failed to fit a line!') left_fitx = 1*ploty**2 + 1*ploty right_fitx = 1*ploty**2 + 1*ploty ## Visualization ## # Colors in the left and right lane regions out_img[lefty, leftx] = [255, 0, 0] out_img[righty, rightx] = [0, 0, 255] # Plots the left and right polynomials on the lane lines plt.plot(left_fitx, ploty, color='yellow') plt.plot(right_fitx, ploty, color='yellow') return left_fit,right_fit,out_img,ploty,left_fitx, right_fitx #measure the left and right lane curvature given the fit poly nomial in metres def measure_curvature_pixels(ploty, left_fitx, right_fitx): # Define conversions in x and y from pixels space to meters ym_per_pix = 30/720 # meters per pixel in y dimension xm_per_pix = 3.7/700 # meters per pixel in x dimension left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2) right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2) # Define y-value where we want radius of curvature # We'll choose the maximum y-value, corresponding to the bottom of the image y_eval = np.max(ploty) ##### TO-DO: Implement the calculation of R_curve (radius of curvature) ##### left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0]) right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0]) #find the centre of the car #left_lane and right lane bottom in pixels left_lane_bottom = left_fitx[-1] right_lane_bottom = right_fitx[-1] # Lane center as mid of left and right lane bottom lane_center = (left_lane_bottom + right_lane_bottom)/2. center_image = 640 center = (lane_center - center_image)*xm_per_pix #Convert to meters position = "left" if center < 0 else "right" center = "Vehicle is {:.2f}m {}".format(center, position) return left_curverad, right_curverad,center #search within the margin aroundthe previous poly fit line for the new lane pixels def poly_search(img,left_fitx, right_fitx,polty): margin = 100 # Generate a polygon to illustrate the search window area # And recast the x and y points into usable format for cv2.fillPoly() left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))]) left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))]) left_line_pts = np.hstack((left_line_window1, left_line_window2)) right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))]) right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))]) right_line_pts = np.hstack((right_line_window1, right_line_window2)) # Draw the lane onto the warped blank image out_img = np.zeros_like(img) cv2.fillPoly(out_img, np.int_([left_line_pts]), (0,255, 0)) cv2.fillPoly(out_img, np.int_([right_line_pts]), (0,255, 0)) search_area = cv2.addWeighted(img, 1, out_img, 0.3, 0) return search_area #find lane pixels from previos poly search area def poly_search_lane(img): [left_fit, right_fit,polty] = line.prev_poly_fit nonzero = img.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) margin = 100 out_img = np.dstack((img, img, img)) #define the boundayl of the poly search area based onthe prevous poly fit left_win1 = left_fit[0]*nonzeroy**2 + left_fit[1]*nonzeroy + left_fit[2]-100 left_win2 = left_fit[0]*nonzeroy**2 + left_fit[1]*nonzeroy + left_fit[2]+100 right_win1 = right_fit[0]*nonzeroy**2 + right_fit[1]*nonzeroy + right_fit[2] -100 right_win2 = right_fit[0]*nonzeroy**2 + right_fit[1]*nonzeroy + right_fit[2] +100 #find the indexes of the nonezeros that within the right and left poly search area left_ind = [(nonzerox > left_win1) & (nonzerox <left_win2)] right_ind = [(nonzerox > right_win1) & (nonzerox <right_win2)] leftx = nonzerox[left_ind] lefty = nonzeroy[left_ind] rightx = nonzerox[right_ind] righty = nonzeroy[right_ind] #print(leftx.shape,lefty.shape,rightx.shape,righty.shape) return leftx, lefty, rightx, righty, out_img #given the undistorted image and the poly fit give back the curve fit on the undistrot image def warp_back(undist,Minv,left_fitx,right_fitx,ploty): # Create an image to draw the lines on color_warp = np.zeros_like(undist).astype(np.uint8) # Recast the x and y points into usable format for cv2.fillPoly() pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))]) pts = np.hstack((pts_left, pts_right)) #print(pts.shape) # Draw the lane onto the warped blank image cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0)) # Warp the blank back to original image space using inverse perspective matrix (Minv) newwarp = cv2.warpPerspective(color_warp, Minv, (undist.shape[1], undist.shape[0])) # Combine the result with the original image lane_fit = cv2.addWeighted(undist, 1, newwarp, 0.3, 0) return lane_fit def put_text(img,left_curverad,right_curverad,center): font = cv2.FONT_HERSHEY_SIMPLEX location = (10,100) fontScale = 1 fontColor = (255,255,255) lineType = 2 #left curve line1 = "Left Curverad: " + str(left_curverad) + " metres" cv2.putText(img,line1, location, font, fontScale, fontColor, lineType) #right curve line2 = "Right Curverad: " + str(right_curverad) + " metres" location = (10,140) cv2.putText(img,line2, location, font, fontScale, fontColor, lineType) #centre line3 = center location = (10,180) cv2.putText(img,line3, location, font, fontScale, fontColor, lineType) return def sanity_check(): #centre of the car move #curvature of both side difference in percentage return # + #now we have the bird eye view of the lane bird_eye find the lane boundary left_fit,right_fit,out_img,ploty,left_fitx, right_fitx = fit_polynomial(bird_eye) #get the left and right lane curvature left_curverad, right_curverad,center= measure_curvature_pixels(ploty, left_fitx, right_fitx) print("left curverad:",int(left_curverad),"right curverad:", int(right_curverad)) #get the distance to the center of the curve #use the previous poly fit and plot the next search lane region with a margin to the ploy fit line search_area =poly_search(out_img,left_fitx, right_fitx,ploty) #warp the image back to the original image lane_fit = warp_back(undist,Minv,left_fitx,right_fitx,ploty) #cv2.imshow('fit',lane_fit) #cv2.waitKey(5000) put_text(lane_fit,int(left_curverad),int(right_curverad),center) plt.imshow(out_img) # - # # The Line Class to keep some history parametres # # Define a class to receive the characteristics of each line detection class Line(): def __init__(self): # was the line detected in the last iteration? self.detected = False # x values of the last n fits of the line self.recent_xfitted = [] #average x values of the fitted line over the last n iterations self.bestx = None #polynomial coefficients averaged over the last n iterations self.best_fit = None #the previous poly fit self.prev_poly_fit = None #[left_fit, right_fit,ploty] #polynomial coefficients for the most recent fit self.current_fit = [np.array([False])] #radius of curvature of the line in some units self.radius_of_curvature = None #distance in meters of vehicle center from the line self.line_base_pos = None #difference in fit coefficients between last and new fits self.diffs = np.array([0,0,0], dtype='float') #x values for detected line pixels self.allx = None #y values for detected line pixels self.ally = None # # Put pipe line togehter # # + line = Line() def pipeline(img): #run the first cell to get the image points and object points for camear undistortion undist = cv2.undistort(img, mtx, dist, None, mtx) #undistort the image #apply the threshold and bird eye transform Minv, bird_eye = bird_eye_transform(undist) #now we have the bird eye view of the lane bird_eye find the lane boundary if line.prev_poly_fit == None: left_fit,right_fit,out_img,ploty,left_fitx, right_fitx = fit_polynomial(bird_eye) else: left_fit,right_fit,out_img,ploty,left_fitx, right_fitx = fit_polynomial(bird_eye,poly =True) line.prev_poly_fit = [left_fit, right_fit,ploty] #get the left and right lane curvature left_curverad, right_curverad,center = measure_curvature_pixels(ploty, left_fitx, right_fitx) #print("left curverad:",left_curverad,"right curverad:", right_curverad) #get the distance to the center of the curve #use the previous poly fit and plot the next search lane region with a margin to the ploy fit line #search_area =poly_search(out_img,left_fitx, right_fitx,ploty) #warp the image back to the original image lane_fit = warp_back(undist,Minv,left_fitx,right_fitx,ploty) put_text(lane_fit,int(left_curverad),int(right_curverad),center) return lane_fit #plt.imshow(lane_fit) # - # ## Test in the Video # + # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML def process_image(img): return pipeline(img) white_output = '../output_images/project_video.mp4' clip1 = VideoFileClip("../project_video.mp4") white_clip = clip1.fl_image(process_image) # %time white_clip.write_videofile(white_output, audio=False) # - HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(white_output))
examples/example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="CWSFYwBkp6x7" # Tutorials table of content: # # - [Tutorial 1: Run a first scenario](./Tutorial-1_Run_your_first_scenario.ipynb) # - Tutorial 2: Add contributivity measurements methods # - [Tutorial 3: Use a custom dataset](./Tutorial-3_Use_homemade_dataset.ipynb) # # # # Tutorial 2 : Exploring contributivity # # With this example, we dive deeper into the potential of the library, and the notion of contributivity. # + [markdown] colab_type="text" id="VotO3fz-VY8c" # ## 1 - Prerequisites # # In order to run this example, you'll need to: # # * use python 3.7 + # * install this package https://pypi.org/project/mplc/ # # If you did not follow our first tutorial, it is recommended to [take a look at it !](https://github.com/SubstraFoundation/distributed-learning-contributivity/blob/master/notebooks/examples/1%20_INTRO_MNIST.ipynb) # # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="3ErUs18-VY8d" outputId="d44c15ab-4484-477b-e3be-4e052931a0ce" pycharm={"name": "#%%\n"} # !pip install mplc # + [markdown] colab_type="text" id="R1t8vwsxVY8g" # ## 2 - Context # # In collaborative data science projects partners sometimes need to train a model on multiple datasets, contributed by different data providing partners. In such cases the partners might have to measure how much each dataset involved contributed to the performance of the model. This is useful for example as a basis to agree on how to share the reward of the ML challenge or the future revenues derived from the predictive model, or to detect possible corrupted datasets or partners not playing by the rules. The library explores this question and the opportunity to implement some mechanisms helping partners in such scenarios to measure each dataset's *contributivity* (as *contribution to the performance of the model*). # # In the [first tutorial](./Tutorial-1_Run_your_first_scenario.ipynb), you learned how to parametrize and run a scenario. # In this tutorial, you will learn how to add one of the contributivity measurement implemented to your scenario run. # + colab={"base_uri": "https://localhost:8080/", "height": 70} colab_type="code" id="S2CY2M0Hp6x8" outputId="aee525c3-e70b-4e46-84bf-207d8006258e" # imports import pandas as pd import seaborn as sns sns.set() # + [markdown] colab_type="text" id="esSTt9Qrp6yC" # ## 2 - Setup and run the scenario # # We will use the same dataset, and overall setup for the scenario. The main change relies on the contributivity parameter, which is a list of the contributivity methods that will be tested. As these methods are time-consuming, the parameter is set empty. # # All methods available are: # # ```python # - "Shapley values" # - "Independent scores" # - "TMCS" # - "ITMCS" # - "IS_lin_S" # - "IS_reg_S" # - "AIS_Kriging_S" # - "SMCS" # - "WR_SMC" # ``` # # See in the documentation the [dedicated section](https://github.com/SubstraFoundation/distributed-learning-contributivity/blob/master/subtest/docs/documentation.md#contributivity-measurement-approaches-studied-and-implemented) for explanation of the different methods. # # Here we will use the Shapley value, a contributivity measurement which came from cooperative game theory. # + colab={"base_uri": "https://localhost:8080/", "height": 306} colab_type="code" id="M4ZdwGV3p6yD" outputId="e78c8050-5cac-47b5-f51f-e8987378fce3" from mplc.scenario import Scenario my_scenario = Scenario(partners_count=3, amounts_per_partner=[0.001, 0.699, 0.3], epoch_count=10, minibatch_count=3, dataset='mnist', methods=["Shapley values"]) # <- Here is the difference # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="tr2Bf8vSp6yP" outputId="c2ea5301-846c-4748-fe58-94bf65305504" my_scenario.run() # + [markdown] colab_type="text" id="Xx9DUyR6p6yR" # ## 3 - Accuracy score between each partner and comparison with aggregated model performance # # Like in the first tutorial, we take a look at the scores, local and global. # + colab={"base_uri": "https://localhost:8080/", "height": 343} colab_type="code" id="XZwNSGhyVY8o" outputId="fa7b068b-772f-4a72-afca-6c57cd04324a" history_df = my_scenario.mpl.history.partners_to_dataframe() accuracy_df = history_df[history_df.Minibatch==2].pivot(index='Epoch', columns='Model', values='val_acc') score_mpl_model = my_scenario.mpl.history.history['mpl_model']['val_accuracy'][:,-1] accuracy_df['collective model'] = score_mpl_model accuracy_df # + [markdown] colab_type="text" id="KsDY6LcYp6yU" # We can plot the evolution of the accuracy through the epochs. # + colab={"base_uri": "https://localhost:8080/", "height": 415} colab_type="code" id="bz5Iw4YOVY8r" outputId="81021e0c-0e47-4a49-ab53-b5144df9adb3" pycharm={"name": "#%%\n"} ax = sns.relplot(data = accuracy_df, kind="line") ax.set(xlabel='epochs', ylabel='accuracy', title='Accuracy evolution through the epochs') # + [markdown] colab_type="text" id="D9E5AIUQVY8t" pycharm={"name": "#%% md\n"} # ## 4 - Contributivity scores # # We have set our scenario with Shapley values as a contributivity measurement method. # # While being quite heavy on computing resources, it provides a great measuring tool. # # # + colab={} colab_type="code" id="aqoho7oFp6yU" contributivity_score = my_scenario.contributivity_list # + colab={"base_uri": "https://localhost:8080/", "height": 151} colab_type="code" id="7IpNbZDqp6yW" outputId="d5db4313-e16c-49b6-e803-3d6f4b8138cc" print(contributivity_score[0]) # + [markdown] colab_type="text" id="goGzd4ePp6yY" # Since we have artificially set our first partner to only have .1% of the total data set, it obviously contributes less in the final model. The poor accuracy of the local model of the first partner spotlights greatly this effect. However, as we use the MNIST dataset, even with .1% of the total data, the model is still able to perform reasonably well according to our Accuracy values # # There is other way to artificially generate poor contributors. For instance, we can shuffle the labels of one partner, which will mislabel the whole dataset of this particular partner. # To do so, we use the `corruption_parameters`of the scenario object. It must be a list, with size equal to the number of partner. For each partner , a string identifier indicates the method of corruption to apply on its dataset. # # ``` python # corrupted_partner=['random-uniform', 'not-corrupted', 'not-corrupted'] # # ``` # Here, with 3 partners, the first one will see its labels shuffled randomly, and the two others will have their dataset untouched. # # More parameters (like the proportion of the data to be corrupted) can be used, but in that case the string identifiers will not be enough. # You will have to instantiate the corresponding corruption object before, and pass it to the scenario, instead of the string identifier. # ``` python # from mplc.corruption import Permutation, Random$ # # partner0_corruption = Permutation(proportion=0.4) # partner1_corruption = Random(proportion=0.9) # corrupted_partner=[partner9_corruption, partner1_corruption, 'not-corrupted'] # # ``` # Here the first one will see its labels permuted, by a permutation matrix randomly generated (and accessible via partner.corruption.matrix). The second one will see its labels randomly changed, according to a Dirichlet distribution, which you can access to via partner.corruption.matrix attribute. Finally, the last partner will be its data untouched. # Please denote that the partner.corruption.matrix will be equal to identity matrix in that case. # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="b0XHWBPxp6yY" outputId="77a0bfe5-8298-4449-c066-4b9475d14a59" my_second_scenario = Scenario(partners_count=3, amounts_per_partner=[0.2, 0.5, 0.3], # <- The repartition is more regular corruption_parameters = ['random-uniform', 'not-corrupted', 'not-corrupted'], # <- Here is the true difference epoch_count=10, minibatch_count=3, dataset='mnist', methods=["Shapley values"]) my_second_scenario.run() # + colab={"base_uri": "https://localhost:8080/", "height": 151} colab_type="code" id="T0qGMB53VY84" outputId="d6f51659-f31d-4806-a846-837cb83e6e4c" contributivity_score = my_second_scenario.contributivity_list print(contributivity_score[0]) # + [markdown] colab_type="text" id="bNBE7h41VY87" # In this case, one can denotes that the contributivity measure of the first partner is really low, and lower that in the previous case. We successfully detect a partner with corrupted labels, without any look at these data ! # # In the first case, if we thought about the repartition of a reward, or the money earned by the collaborative model, this Shapley value gives us a great idea of a fair repartition. It is important to denote that the Shapley value is not strongly related to the size of the dataset. # In the second case, you were able to spotlight a corrupted dataset.It could be a partner who missed something in the preprocessing step ( human error, not a problem, but need to be detected !), or worth. Some partners could not play by the rules, and did not shared their precious data... # # # That's it! # # Now you can explore our other tutorials for a better overview of what can be done with `mplc`! # # This work is collaborative, enthusiasts are welcome to comment open issues and PRs or open new ones. # # Should you be interested in this open effort and would like to share any question, suggestion or input, you can use the following channels: # # - This Github repository (issues or PRs) # - Substra Foundation's [Slack workspace](https://substra-workspace.slack.com/join/shared_invite/zt-cpyedcab-FHYgpy08efKJ2FCadE2yCA), channel `#workgroup-mpl-contributivity` # - Email: <EMAIL> # # ![logo Substra Foundation](../../img/substra_logo_couleur_rvb_w150px.png)
notebooks/tutorials/Tutorial-2_Add_contributivity_measurement.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Loading Features From Dictionaries from sklearn.feature_extraction import DictVectorizer staff = [{'name': '<NAME>', 'age': 33.}, {'name': '<NAME>', 'age': 12.}, {'name': '<NAME>', 'age': 18.}] # Create an object for our dictionary vectorizer vec = DictVectorizer() # Fit then transform the staff dictionary with vec, then output an array vec.fit_transform(staff).toarray() # - # Get Feature Names vec.get_feature_names() # + #Make Simulated Data For Clustering from sklearn.datasets import make_blobs import matplotlib.pyplot as plt # - # Make the features (X) and output (y) with 200 samples, X, y = make_blobs(n_samples = 200, # two feature variables, n_features = 2, # three clusters, centers = 3, # with .5 cluster standard deviation, cluster_std = 0.5, # shuffled, shuffle = True) # + # Create a scatterplot of the first and second features plt.scatter(X[:,0], X[:,1],color = 'lime') # Show the scatterplot plt.show() # + #Loading scikit-learn's Boston Housing Dataset # Load libraries from sklearn import datasets import matplotlib.pyplot as plt # + # Load digits dataset boston = datasets.load_boston() # Create feature matrix X = boston.data # Create target vector y = boston.target # View the first observation's feature values X[0] # - # Display each feature value of the first observation as floats ['{:f}'.format(x) for x in X[0]] # + #Make Simulated Data For Regression import pandas as pd from sklearn.datasets import make_regression # - # Generate fetures, outputs, and true coefficient of 100 samples, features, output, coef = make_regression(n_samples = 100, # three features n_features = 3, # where only two features are useful, n_informative = 2, # a single target value per observation n_targets = 1, # 0.0 standard deviation of the guassian noise noise = 0.0, # show the true coefficient used to generated the data coef = True) # View the features of the first five rows pd.DataFrame(features, columns=['Store 1', 'Store 2', 'Store 3']).head() # View the output of the first five rows pd.DataFrame(output, columns=['Sales']).head() # View the actual, true coefficients used to generate the data pd.DataFrame(coef, columns=['True Coefficient Values']) # + #Loading scikit-learn's Digits Dataset # Load libraries from sklearn import datasets import matplotlib.pyplot as plt # + # Load digits dataset digits = datasets.load_digits() # Create feature matrix X = digits.data # Create target vector y = digits.target # View the first observation's feature values X[0] # - # View the first observation's feature values as a matrix digits.images[0] # Visualize the first observation's feature values as an image plt.gray() plt.matshow(digits.images[0]) plt.show() # + #Perceptron In Scikit # Load required libraries from sklearn import datasets from sklearn.preprocessing import StandardScaler from sklearn.linear_model import Perceptron from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score import numpy as np # + # Load the iris dataset iris = datasets.load_iris() # Create our X and y data X = iris.data y = iris.target # - # View the first five observations of our y data y[:5] # View the first five observations of our x data. # Notice that there are four independent variables (features) X[:5] # Split the data into 70% training data and 30% test data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) # Train the scaler, which standarizes all the features to have mean=0 and unit variance sc = StandardScaler() sc.fit(X_train) # + # Apply the scaler to the X training data X_train_std = sc.transform(X_train) # Apply the SAME scaler to the X test data X_test_std = sc.transform(X_test) # + # Create a perceptron object with the parameters: 40 iterations (epochs) over the data, and a learning rate of 0.1 ppn = Perceptron(n_iter=40, eta0=0.1, random_state=0) # Train the perceptron ppn.fit(X_train_std, y_train) # - # Apply the trained perceptron on the X data to make predicts for the y test data y_pred = ppn.predict(X_test_std) # View the predicted y test data y_pred # View the true y test data y_test # View the accuracy of the model, which is: 1 - (observations predicted wrong / total observations) print('Accuracy: %.2f' % accuracy_score(y_test, y_pred)) # + #Loading scikit-learn's Iris Dataset # Load digits dataset iris = datasets.load_iris() # Create feature matrix X = iris.data # Create target vector y = iris.target # View the first observation's feature values X[0] # + #Saving Machine Learning Models from sklearn.linear_model import LogisticRegression from sklearn import datasets import pickle from sklearn.externals import joblib # + # Load the iris data iris = datasets.load_iris() # Create a matrix, X, of features and a vector, y. X, y = iris.data, iris.target # - # Train a naive logistic regression model clf = LogisticRegression(random_state=0) clf.fit(X, y) # Save the trained model as a pickle string. saved_model = pickle.dumps(clf) # View the pickled model saved_model # + # Load the pickled model clf_from_pickle = pickle.loads(saved_model) # Use the loaded pickled model to make predictions clf_from_pickle.predict(X) # - # Save the model as a pickle in a file joblib.dump(clf, 'filename.pkl') # Load the model from the file clf_from_joblib = joblib.load('filename.pkl') # Use the loaded model to make predictions clf_from_joblib.predict(X) # + #Make Simulated Data For Classification from sklearn.datasets import make_classification import pandas as pd # - # Create a simulated feature matrix and output vector with 100 samples, features, output = make_classification(n_samples = 100, # ten features n_features = 10, # five features that actually predict the output's classes n_informative = 5, # five features that are random and unrelated to the output's classes n_redundant = 5, # three output classes n_classes = 3, # with 20% of observations in the first class, 30% in the second class, # and 50% in the third class. ('None' makes balanced classes) weights = [.2, .3, .8]) # View the first five observations and their 10 features pd.DataFrame(features).head() # View the first five observation's classes pd.DataFrame(output).head()
Basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # ExternalSource operator # # In this example, we will see how to use `ExternalSource` operator, that allows us to # use an external data source as an input to the Pipeline. # # In order to achieve that, we have to define a Iterator or Generator class which `next` function will # return one or several `numpy` arrays. # + import types import collections import numpy as np from random import shuffle from nvidia.dali.pipeline import Pipeline import nvidia.dali.ops as ops import nvidia.dali.types as types batch_size = 16 # - # ### Defining the iterator class ExternalInputIterator(object): def __init__(self, batch_size): self.images_dir = "images/" self.batch_size = batch_size with open(self.images_dir + "file_list.txt", 'r') as f: self.files = [line.rstrip() for line in f if line is not ''] shuffle(self.files) def __iter__(self): self.i = 0 self.n = len(self.files) return self def __next__(self): batch = [] labels = [] for _ in range(self.batch_size): jpeg_filename, label = self.files[self.i].split(' ') f = open(self.images_dir + jpeg_filename, 'rb') batch.append(np.frombuffer(f.read(), dtype = np.uint8)) labels.append(np.array([label], dtype = np.uint8)) self.i = (self.i + 1) % self.n return (batch, labels) next = __next__ # ### Defining the pipeline # # The next step is to define the Pipeline. # # We override `Pipeline.iter_setup`, a method called by the pipeline before every `Pipeline.run`, to call the iterator # and feed the result to `ExternalSource()` operator, referenced by `self.jpeg`, by using `feed_input`. eii = ExternalInputIterator(batch_size) iterator = iter(eii) class ExternalSourcePipeline(Pipeline): def __init__(self, batch_size, num_threads, device_id): super(ExternalSourcePipeline, self).__init__(batch_size, num_threads, device_id, seed=12) self.input = ops.ExternalSource() self.input_label = ops.ExternalSource() self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB) self.cast = ops.Cast(device = "gpu", dtype = types.INT32) def define_graph(self): self.jpegs = self.input() self.labels = self.input_label() images = self.decode(self.jpegs) output = self.cast(images) return (output, self.labels) def iter_setup(self): (images, labels) = iterator.next() self.feed_input(self.jpegs, images) self.feed_input(self.labels, labels) # ### Using the pipeline pipe = ExternalSourcePipeline(batch_size=batch_size, num_threads=2, device_id = 0) pipe.build() pipe_out = pipe.run() # Notice that labels are still on CPU and no asCPU call is needed to show them. batch_cpu = pipe_out[0].asCPU() labels_cpu = pipe_out[1] import matplotlib.pyplot as plt img = batch_cpu.at(2) print(img.shape) print(labels_cpu.at(2)) plt.imshow(img.astype('uint8'))
docs/examples/external_input.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SIMD Autovectorization in Numba # # **This Notebook has been adapted with minor changes from https://github.com/numba/numba-examples. The original is licensed undera BSD-2 license and copyrighted by Numba. See https://github.com/numba/numba-examples/blob/master/LICENSE for details.** # # Most modern CPUs have support for instructions that apply the same operation to multiple data elements simultaneously. These are called "Single Instruction, Multiple Data" (SIMD) operations, and the LLVM backend used by Numba can generate them in some cases to execute loops more quickly. (This process is called "autovectorization.") # # For example, Intel processors have support for SIMD instruction sets like: # # * SSE (128-bit inputs) # * AVX (256-bit inputs) # * AVX-512 (512-bit inputs, Skylake-X and later or Xeon Phi) # # These wide instructions typically operate on as many values as will fit into an input register. For AVX instructions, this means that either 8 float32 values or 4 float64 values can be processed as a single input. As a result, the NumPy dtype that you use can potentially impact performance to a greater degree than when SIMD is not in use. import numpy as np from numba import jit # It can be somewhat tricky to determine when LLVM has successfully autovectorized a loop. The Numba team is working on exporting diagnostic information to show where the autovectorizer has generated SIMD code. For now, we can use a fairly crude approach of searching the assembly language generated by LLVM for SIMD instructions. # # It is also interesting to note what kind of SIMD is used on your system. On x86_64, the name of the registers used indicates which level of SIMD is in use: # # * SSE: `xmmX` # * AVX/AVX2: `ymmX` # * AVX-512: `zmmX` # # where X is an integer. # # **Note**: The method we use below to find SIMD instructions will only work on Intel/AMD CPUs. Other platforms have entirely different assembly language syntax for SIMD instructions. def find_instr(func, keyword, sig=0, limit=5): count = 0 for l in func.inspect_asm(func.signatures[sig]).split('\n'): if keyword in l: count += 1 print(l) if count >= limit: break if count == 0: print('No instructions found') # ## Basic SIMD # # Let's start with a simple function that returns the square difference between two arrays, as you might write for a least-squares optimization: @jit(nopython=True) def sqdiff(x, y): out = np.empty_like(x) for i in range(x.shape[0]): out[i] = (x[i] - y[i])**2 return out x32 = np.linspace(1, 2, 10000, dtype=np.float32) y32 = np.linspace(2, 3, 10000, dtype=np.float32) sqdiff(x32, y32) x64 = x32.astype(np.float64) y64 = y32.astype(np.float64) sqdiff(x64, y64) # Numba has created two different implementations of the function, one for `float32` 1-D arrays, and one for `float64` 1-D arrays: sqdiff.signatures # This allows Numba (and LLVM) to specialize the use of the SIMD instructions for each situation. In particular, using lower precision floating point allows twice as many values to fit into a SIMD register. We will see that for the same number of elements, the `float32` calculation goes twice as fast: # %timeit sqdiff(x32, y32) # %timeit sqdiff(x64, y64) # We can check for SIMD instructions in both cases. (Due to the order of compilation above, signature 0 is the `float32` implementation and signature 1 is the `float64` implementation.) print('float32:') find_instr(sqdiff, keyword='subp', sig=0) print('---\nfloat64:') find_instr(sqdiff, keyword='subp', sig=1) # In x86_64 assembly, SSE uses `subps` for "subtraction packed single precision" (AVX uses `vsubps`), representing vector float32 operations. The `subpd` instruction (AVX = `vsubpd`) stands for "subtraction packed double precision", representing float64 operations. # ## SIMD and Division # # In general, the autovectorizer cannot deal with branches inside loops, although this is an area where LLVM is likely to improve in the future. Your best bet for SIMD acceleration is to only have pure math operations in the loop. # # As a result, you would naturally assume a function like this would be OK: @jit(nopython=True) def frac_diff1(x, y): out = np.empty_like(x) for i in range(x.shape[0]): out[i] = 2 * (x[i] - y[i]) / (x[i] + y[i]) return out frac_diff1(x32, y32) find_instr(frac_diff1, keyword='subp', sig=0) # `No instructions found`?! # # The problem is that division by zero can behave in two different ways: # # * In Python, division by zero raises an exception. # * In NumPy, division by zero results in a `NaN` or `inf`, like in C. # # By default, Numba `@jit` follows the Python convention, and `@vectorize`/`@guvectorize` follow the NumPy convention. When following the Python convention, a simple division operation `r = x / y` expands out into something like: # # ``` python # # if y == 0: # raise ZeroDivisionError() # else: # r = x / y # ``` # # This branching code causes the autovectorizer to give up, and no SIMD to be generated for our example above. # # Fortunately, Numba allows you to override the "error model" of the function if you don't want a `ZeroDivisionError` to be raised: @jit(nopython=True, error_model='numpy') def frac_diff2(x, y): out = np.empty_like(x) for i in range(x.shape[0]): out[i] = 2 * (x[i] - y[i]) / (x[i] + y[i]) return out frac_diff2(x32, y32) find_instr(frac_diff2, keyword='subp', sig=0) # We have SIMD instructions again, but when we check the speed: # %timeit frac_diff2(x32, y32) # %timeit frac_diff2(x64, y64) # This is faster than the no-SIMD case, but there doesn't seem to be a speed benefit with `float32` inputs. What's going on? # # The remaining issue is very subtle. We can see it if we look at a type-annotated version of the function: frac_diff2.inspect_types(pretty=True) # If you expand out line 5 in the float32 version of the function, you will see the following bit of Numba IR: # # ``` # $const30.2 = const(int, 2) :: Literal[int](2) # $36binary_subscr.5 = getitem(value=x, index=i) :: float32 # $42binary_subscr.8 = getitem(value=y, index=i) :: float32 # $44binary_subtract.9 = $36binary_subscr.5 - $42binary_subscr.8 :: float32 # del $42binary_subscr.8 # del $36binary_subscr.5 # $46binary_multiply.10 = $const30.2 * $44binary_subtract.9 :: float64``` # # Notice that the constant `2` has been typed as an int value. Later, this causes the multiplication `2 * (x[i] - y[i]` to promote up to float64, and then the rest of the calculation becomes float64. This is a situation where Numba is being overly conservative (and should be fixed at some point), but we can tweak this behavior by casting the constant to the type we want: @jit(nopython=True, error_model='numpy') def frac_diff3(x, y): out = np.empty_like(x) dt = x.dtype # Cast the constant using the dtype of the input for i in range(x.shape[0]): # Could also use np.float32(2) to always use same type, regardless of input out[i] = dt.type(2) * (x[i] - y[i]) / (x[i] + y[i]) return out frac_diff3(x32, y32) # %timeit frac_diff3(x32, y32) # %timeit frac_diff3(x64, y64) # Now our float32 version is nice and speedy (and 6x faster than what we started with, if we only care about float32). # ## SIMD and Reductions # # The autovectorizer can also optimize reduction loops, but only with permission. Normally, compilers are very careful not to reorder floating point instructions because floating point arithmetic is approximate, so mathematically allowed transformations do not always give the same result. For example, it is not generally true for floating point numbers that: # # ``` # (a + (b + c)) == ((a + b) + c) # ``` # # For many situations, the round-off error that causes the difference between the left and the right is not important, so changing the order of additions is acceptable for a performance increase. # # To allow reordering of operations, we need to tell Numba to enable `fastmath` optimizations: # + @jit(nopython=True) def do_sum(A): acc = 0. # without fastmath, this loop must accumulate in strict order for x in A: acc += x**2 return acc @jit(nopython=True, fastmath=True) def do_sum_fast(A): acc = 0. # with fastmath, the reduction can be vectorized as floating point # reassociation is permitted. for x in A: acc += x**2 return acc # - do_sum(x32) find_instr(do_sum, keyword='mulp') # look for vector multiplication do_sum_fast(x32) find_instr(do_sum_fast, keyword='mulp') # The fast version is 2x faster: # %timeit do_sum(x32) # %timeit do_sum_fast(x32)
hpc_lecture_notes/simd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} id="UO4namhuBH3l" # COMP 215 - LAB 1 # ---------------- # #### Name: # #### Date: # This lab exercise is mostly to introduce some of the power in Jupyter Notebooks. # Note that a Notebook is composed of "cells" - some are "text", like this one, while others are "code" # # The code cell below simply imports all the modules we'll be using... # + pycharm={"name": "#%%\n"} id="DqSJNdSdBH3n" import datetime, json, requests import matplotlib.pyplot as plt import matplotlib.dates as mdates from pprint import pprint # + [markdown] id="0rnf67FTBH3p" # Now, let's fetch some Covid-19 daily case-count data from: https://opencovid.ca/api/ # # Query: # - `stat=cases` # the type of data to fetch # - `loc=BC` # the location to fetch data for # - `after=01-12-2021` # since the 1st of december (note date format: dd-mm-yyyy) # + pycharm={"name": "#%%\n"} id="Al2T68b2BH3p" outputId="28a736f4-8a65-47d1-865a-92c0abb43ddd" query = 'https://api.opencovid.ca/timeseries?stat=cases&loc=BC&after=01-12-2021' response = requests.request("GET", query, headers={}, data={}) print(response.text) # + [markdown] pycharm={"name": "#%% md\n"} id="np4HQTmABH3q" # Notice that the response is just a string of text (most data is exchanged on the web as plain text!) # # The `json.loads` function "parses" such text and loads the data into a dictionary... # + pycharm={"name": "#%%\n"} id="M5wV1C_0BH3q" result = json.loads(response.text) #pprint(result) # remove this comment and re-run this cell to see the resulting dictionary # + [markdown] pycharm={"name": "#%% md\n"} id="dAXX9kuZBH3r" # Next we use "list comprehension" to extract the list of dates and associated cases into "parallel lists" # # Notice how we convert the date strings, using strptime, into a real date objects so they are easier to work with (format: dd-mm-yyyy) # + pycharm={"name": "#%%\n"} id="JOFe4vNCBH3s" outputId="7fe18aae-8f0d-4c61-f5c2-b789deda0105" date_strings = [daily['date_report'] for daily in result['cases']] dates = [datetime.datetime.strptime(date, '%d-%m-%Y').date() for date in date_strings] cases = [daily['cases'] for daily in result['cases']] print(dates) print(cases) # + [markdown] pycharm={"name": "#%% md\n"} id="koVKiju9BH3s" # ## Exercise 1 # # In the code cell below, re-write each of the 3 "list comprehensions" above as a loop so you understand how they work. # # Notice that a "list comprehension" is a compact way to write a "list accumlator" algorithm (and more efficient too!) # + pycharm={"name": "#%%\n"} id="aa5xLcWEBH3t" # Your code here # + [markdown] id="2ow28BuTBH3u" # Finally, we'll plot the (dates,cases) data as a nice x-y line graph. # # The code to format the x-axis labels is taken from https://matplotlib.org/stable/gallery/ticks/date_concise_formatter.html # # # + pycharm={"name": "#%%\n"} id="FgwPH8mTBH3v" outputId="536431b5-04b4-43d5-8548-010918a22a9e" def format_date_axis(ax): """ format the dates shown on the x-axis of given axes, ax """ locator = mdates.AutoDateLocator(minticks=10, maxticks=20) formatter = mdates.ConciseDateFormatter(locator) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(formatter) fig, ax = plt.subplots() format_date_axis(ax) ax.plot(dates, cases, label='Daily Cases') # Plot some data on the axes. ax.set_xlabel('Date') # Add an x-label to the axes. ax.set_ylabel('confirmed casess') # Add a y-label to the axes. ax.set_title("Covid-19 case counts for BC") # Add a title to the axes. ax.legend(); # + [markdown] id="qqeTORnvBH3v" # ## Exercise 2 # # Repeat the analysis above, but this time only for Vancouver Coastal Health Region. # # * Try to modify as little code as you can (should be just a few characters), and re-run the analysis # * You can get the location code for each health region here: https://opencovid.ca/api/#health-region-codes # * Can you generalize the code in the 2nd code cell to make it easier to repeat the analysis for different locations? What about for different dates? # # ## Exercise 3 # # Notice that BC does not report case counts on weekends so the data plot looks erratic. # # * One way to fix that is to plot a "7-day rolling average" instead. # Each day, we take the average of the previous 7 days cases. # * Add new code cell below, compute the 7-day rolling average for each day from the cases list. # * Create a plot to display the rolling average data and compare your plost with the one produced above. # # Hints: you are free to do this however you like, but a quite elegant solution uses list comprehension, range, and slices #
labs/lab1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Problem 032 # # A perfect number is a number for which the sum of its proper divisors is exactly equal to the number. For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number. # # A number n is called deficient if the sum of its proper divisors is less than n and it is called abundant if this sum exceeds n. # # As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest number that can be written as the sum of two abundant numbers is 24. By mathematical analysis, it can be shown that all integers greater than 28123 can be written as the sum of two abundant numbers. However, this upper limit cannot be reduced any further by analysis even though it is known that the greatest number that cannot be expressed as the sum of two abundant numbers is less than this limit. # # Find the sum of all the positive integers which cannot be written as the sum of two abundant numbers. # # Solution # # For this one, we build up to a function to determine if a given integer is abundant. def divisors(n): divisors = [] i = 1 while i < n: if (n % i == 0) : divisors.append(i) i = i + 1 return divisors def is_abundant(n): return sum(divisors(n)) > n # Now, we just iterate through the list of abundant numbers. Note we are using the fact that 12 is the smallest abundant number, and since all integers greater than 28123 can be written as the sum of two abundant numbers, we only care about abundant integers up to that point. # # Note this computation takes about 45 seconds on my laptop (7th generation Intel i7). # + abundant_nums = [] for i in range(12, 28123): if is_abundant(i): abundant_nums.append(i) # - # Now we just go through each integer, which we store inside a dictionary for fast access. Then finally sum. # + sum_of_two_abundants = {i: False for i in range(1, 28123 + 1)} for i in abundant_nums: for j in abundant_nums: if i + j > 28123: break sum_of_two_abundants[i + j] = True # - sum(i for (i, x) in sum_of_two_abundants.items() if not x)
python/023.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # About this Notebook # # **Bayesian Temporal Tensor Factorization** (or **BTTF** for short) is a type of Bayesian tensor decomposition that achieves state-of-the-art results on challenging the missing data imputation problem. In the following, we will discuss: # # - What the BTTF is? # # - How to implement BTTF mainly using Python `Numpy` with high efficiency? # # - How to make imputations with real-world spatiotemporal datasets? # # If you want to understand what is BTMF and its modeling tricks in detail, our paper is for you: # # > <NAME>, <NAME> (2019). **Bayesian temporal factorization for multidimensional time series prediction**. # # ## Quick Run # # This notebook is publicly available for any usage at our data imputation project. Please click [**transdim**](https://github.com/xinychen/transdim). import numpy as np from numpy.random import multivariate_normal as mvnrnd from scipy.stats import wishart from scipy.stats import invwishart from numpy.linalg import inv as inv # # Part 1: Matrix Computation Concepts # # ## 1) Kronecker product # # - **Definition**: # # Given two matrices $A\in\mathbb{R}^{m_1\times n_1}$ and $B\in\mathbb{R}^{m_2\times n_2}$, then, the **Kronecker product** between these two matrices is defined as # # $$A\otimes B=\left[ \begin{array}{cccc} a_{11}B & a_{12}B & \cdots & a_{1m_2}B \\ a_{21}B & a_{22}B & \cdots & a_{2m_2}B \\ \vdots & \vdots & \ddots & \vdots \\ a_{m_11}B & a_{m_12}B & \cdots & a_{m_1m_2}B \\ \end{array} \right]$$ # where the symbol $\otimes$ denotes Kronecker product, and the size of resulted $A\otimes B$ is $(m_1m_2)\times (n_1n_2)$ (i.e., $m_1\times m_2$ columns and $n_1\times n_2$ rows). # # - **Example**: # # If $A=\left[ \begin{array}{cc} 1 & 2 \\ 3 & 4 \\ \end{array} \right]$ and $B=\left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10 \\ \end{array} \right]$, then, we have # # $$A\otimes B=\left[ \begin{array}{cc} 1\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] & 2\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] \\ 3\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] & 4\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] \\ \end{array} \right]$$ # # $$=\left[ \begin{array}{cccccc} 5 & 6 & 7 & 10 & 12 & 14 \\ 8 & 9 & 10 & 16 & 18 & 20 \\ 15 & 18 & 21 & 20 & 24 & 28 \\ 24 & 27 & 30 & 32 & 36 & 40 \\ \end{array} \right]\in\mathbb{R}^{4\times 6}.$$ # # ## 2) Khatri-Rao product (`kr_prod`) # # - **Definition**: # # Given two matrices $A=\left( \boldsymbol{a}_1,\boldsymbol{a}_2,...,\boldsymbol{a}_r \right)\in\mathbb{R}^{m\times r}$ and $B=\left( \boldsymbol{b}_1,\boldsymbol{b}_2,...,\boldsymbol{b}_r \right)\in\mathbb{R}^{n\times r}$ with same number of columns, then, the **Khatri-Rao product** (or **column-wise Kronecker product**) between $A$ and $B$ is given as follows, # # $$A\odot B=\left( \boldsymbol{a}_1\otimes \boldsymbol{b}_1,\boldsymbol{a}_2\otimes \boldsymbol{b}_2,...,\boldsymbol{a}_r\otimes \boldsymbol{b}_r \right)\in\mathbb{R}^{(mn)\times r},$$ # where the symbol $\odot$ denotes Khatri-Rao product, and $\otimes$ denotes Kronecker product. # # - **Example**: # # If $A=\left[ \begin{array}{cc} 1 & 2 \\ 3 & 4 \\ \end{array} \right]=\left( \boldsymbol{a}_1,\boldsymbol{a}_2 \right) $ and $B=\left[ \begin{array}{cc} 5 & 6 \\ 7 & 8 \\ 9 & 10 \\ \end{array} \right]=\left( \boldsymbol{b}_1,\boldsymbol{b}_2 \right) $, then, we have # # $$A\odot B=\left( \boldsymbol{a}_1\otimes \boldsymbol{b}_1,\boldsymbol{a}_2\otimes \boldsymbol{b}_2 \right) $$ # # $$=\left[ \begin{array}{cc} \left[ \begin{array}{c} 1 \\ 3 \\ \end{array} \right]\otimes \left[ \begin{array}{c} 5 \\ 7 \\ 9 \\ \end{array} \right] & \left[ \begin{array}{c} 2 \\ 4 \\ \end{array} \right]\otimes \left[ \begin{array}{c} 6 \\ 8 \\ 10 \\ \end{array} \right] \\ \end{array} \right]$$ # # $$=\left[ \begin{array}{cc} 5 & 12 \\ 7 & 16 \\ 9 & 20 \\ 15 & 24 \\ 21 & 32 \\ 27 & 40 \\ \end{array} \right]\in\mathbb{R}^{6\times 2}.$$ def kr_prod(a, b): return np.einsum('ir, jr -> ijr', a, b).reshape(a.shape[0] * b.shape[0], -1) A = np.array([[1, 2], [3, 4]]) B = np.array([[5, 6], [7, 8], [9, 10]]) print(kr_prod(A, B)) # ## 3) Computing Covariance Matrix (`cov_mat`) # # For any matrix $X\in\mathbb{R}^{m\times n}$, `cov_mat` can return a $n\times n$ covariance matrix for special use in the following. def cov_mat(mat): dim1, dim2 = mat.shape new_mat = np.zeros((dim2, dim2)) mat_bar = np.mean(mat, axis = 0) for i in range(dim1): new_mat += np.einsum('i, j -> ij', mat[i, :] - mat_bar, mat[i, :] - mat_bar) return new_mat # ## 4) CP decomposition (`cp_combine`) # # - **Definition**: # # The CP decomposition factorizes a tensor into a sum of outer products of vectors. For example, for a third-order tensor $\mathcal{Y}\in\mathbb{R}^{m\times n\times f}$, the CP decomposition can be written as # # $$\hat{\mathcal{Y}}=\sum_{s=1}^{r}\boldsymbol{u}_{s}\circ\boldsymbol{v}_{s}\circ\boldsymbol{x}_{s},$$ # or element-wise, # # $$\hat{y}_{ijt}=\sum_{s=1}^{r}u_{is}v_{js}x_{ts},\forall (i,j,t),$$ # where vectors $\boldsymbol{u}_{s}\in\mathbb{R}^{m},\boldsymbol{v}_{s}\in\mathbb{R}^{n},\boldsymbol{x}_{s}\in\mathbb{R}^{f}$ are columns of factor matrices $U\in\mathbb{R}^{m\times r},V\in\mathbb{R}^{n\times r},X\in\mathbb{R}^{f\times r}$, respectively. The symbol $\circ$ denotes vector outer product. # # - **Example**: # # Given matrices $U=\left[ \begin{array}{cc} 1 & 2 \\ 3 & 4 \\ \end{array} \right]\in\mathbb{R}^{2\times 2}$, $V=\left[ \begin{array}{cc} 1 & 2 \\ 3 & 4 \\ 5 & 6 \\ \end{array} \right]\in\mathbb{R}^{3\times 2}$ and $X=\left[ \begin{array}{cc} 1 & 5 \\ 2 & 6 \\ 3 & 7 \\ 4 & 8 \\ \end{array} \right]\in\mathbb{R}^{4\times 2}$, then if $\hat{\mathcal{Y}}=\sum_{s=1}^{r}\boldsymbol{u}_{s}\circ\boldsymbol{v}_{s}\circ\boldsymbol{x}_{s}$, then, we have # # $$\hat{Y}_1=\hat{\mathcal{Y}}(:,:,1)=\left[ \begin{array}{ccc} 31 & 42 & 65 \\ 63 & 86 & 135 \\ \end{array} \right],$$ # $$\hat{Y}_2=\hat{\mathcal{Y}}(:,:,2)=\left[ \begin{array}{ccc} 38 & 52 & 82 \\ 78 & 108 & 174 \\ \end{array} \right],$$ # $$\hat{Y}_3=\hat{\mathcal{Y}}(:,:,3)=\left[ \begin{array}{ccc} 45 & 62 & 99 \\ 93 & 130 & 213 \\ \end{array} \right],$$ # $$\hat{Y}_4=\hat{\mathcal{Y}}(:,:,4)=\left[ \begin{array}{ccc} 52 & 72 & 116 \\ 108 & 152 & 252 \\ \end{array} \right].$$ def cp_combine(U, V, X): return np.einsum('is, js, ts -> ijt', U, V, X) U = np.array([[1, 2], [3, 4]]) V = np.array([[1, 3], [2, 4], [5, 6]]) X = np.array([[1, 5], [2, 6], [3, 7], [4, 8]]) print(cp_combine(U, V, X)) print() print('tensor size:') print(cp_combine(U, V, X).shape) # ## 5) Tensor Unfolding (`ten2mat`) and Matrix Folding (`mat2ten`) # # Using numpy reshape to perform 3rd rank tensor unfold operation. [[**link**](https://stackoverflow.com/questions/49970141/using-numpy-reshape-to-perform-3rd-rank-tensor-unfold-operation)] import numpy as np def ten2mat(tensor, mode): return np.reshape(np.moveaxis(tensor, mode, 0), (tensor.shape[mode], -1), order = 'F') X = np.array([[[1, 2, 3, 4], [3, 4, 5, 6]], [[5, 6, 7, 8], [7, 8, 9, 10]], [[9, 10, 11, 12], [11, 12, 13, 14]]]) print('tensor size:') print(X.shape) print('original tensor:') print(X) print() print('(1) mode-1 tensor unfolding:') print(ten2mat(X, 0)) print() print('(2) mode-2 tensor unfolding:') print(ten2mat(X, 1)) print() print('(3) mode-3 tensor unfolding:') print(ten2mat(X, 2)) def mat2ten(mat, tensor_size, mode): index = list() index.append(mode) for i in range(tensor_size.shape[0]): if i != mode: index.append(i) return np.moveaxis(np.reshape(mat, list(tensor_size[index]), order = 'F'), 0, mode) # ## 6) Generating Matrix Normal Distributed Random Matrix def mnrnd(M, U, V): """ Generate matrix normal distributed random matrix. M is a m-by-n matrix, U is a m-by-m matrix, and V is a n-by-n matrix. """ dim1, dim2 = M.shape X0 = np.random.rand(dim1, dim2) P = np.linalg.cholesky(U) Q = np.linalg.cholesky(V) return M + np.matmul(np.matmul(P, X0), Q.T) # # Part 2: Bayesian Temporal Tensor Factorization (BTTF) def BTTF(dense_tensor, sparse_tensor, init, rank, time_lags, maxiter1, maxiter2): """Bayesian Temporal Tensor Factorization, BTTF.""" U = init["U"] V = init["V"] X = init["X"] d = time_lags.shape[0] dim1, dim2, dim3 = sparse_tensor.shape dim = np.array([dim1, dim2, dim3]) pos = np.where((dense_tensor != 0) & (sparse_tensor == 0)) position = np.where(sparse_tensor != 0) binary_tensor = np.zeros((dim1, dim2, dim3)) binary_tensor[position] = 1 beta0 = 1 nu0 = rank mu0 = np.zeros((rank)) W0 = np.eye(rank) tau = 1 alpha = 1e-6 beta = 1e-6 S0 = np.eye(rank) Psi0 = np.eye(rank * d) M0 = np.zeros((rank * d, rank)) mat_hat = np.zeros((dim1, dim2, dim3 + 1)) U_plus = np.zeros((dim1, rank)) V_plus = np.zeros((dim2, rank)) X_plus = np.zeros((dim3, rank)) X_new = np.zeros((dim3 + 1, rank)) X_new_plus = np.zeros((dim3 + 1, rank)) A_plus = np.zeros((rank, rank, d)) tensor_hat_plus = np.zeros((dim1, dim2, dim3 + 1)) for iters in range(maxiter1): for order in range(2): if order == 0: mat = U.copy() elif order == 1: mat = V.copy() mat_bar = np.mean(mat, axis = 0) var_mu_hyper = (dim[order] * mat_bar + beta0 * mu0)/(dim[order] + beta0) var_W_hyper = inv(inv(W0) + cov_mat(mat) + dim[order] * beta0/(dim[order] + beta0) * np.outer(mat_bar - mu0, mat_bar - mu0)) var_Lambda_hyper = wishart(df = dim[order] + nu0, scale = var_W_hyper, seed = None).rvs() var_mu_hyper = mvnrnd(var_mu_hyper, inv((dim[order] + beta0) * var_Lambda_hyper)) if order == 0: var1 = kr_prod(X, V).T elif order == 1: var1 = kr_prod(X, U).T var2 = kr_prod(var1, var1) var3 = (tau * np.matmul(var2, ten2mat(binary_tensor, order).T).reshape([rank, rank, dim[order]]) + np.dstack([var_Lambda_hyper] * dim[order])) var4 = (tau * np.matmul(var1, ten2mat(sparse_tensor, order).T) + np.dstack([np.matmul(var_Lambda_hyper, var_mu_hyper)] * dim[order])[0, :, :]) for i in range(dim[order]): inv_var_Lambda = inv(var3[ :, :, i]) vec = mvnrnd(np.matmul(inv_var_Lambda, var4[:, i]), inv_var_Lambda) if order == 0: U[i, :] = vec.copy() elif order == 1: V[i, :] = vec.copy() Z_mat = X[np.max(time_lags) : dim3, :] Q_mat = np.zeros((dim3 - np.max(time_lags), rank * d)) for t in range(np.max(time_lags), dim3): Q_mat[t - np.max(time_lags), :] = X[t - time_lags, :].reshape([rank * d]) var_Psi = inv(inv(Psi0) + np.matmul(Q_mat.T, Q_mat)) var_M = np.matmul(var_Psi, np.matmul(inv(Psi0), M0) + np.matmul(Q_mat.T, Z_mat)) var_S = (S0 + np.matmul(Z_mat.T, Z_mat) + np.matmul(np.matmul(M0.T, inv(Psi0)), M0) - np.matmul(np.matmul(var_M.T, inv(var_Psi)), var_M)) Sigma = invwishart(df = nu0 + dim3 - np.max(time_lags), scale = var_S, seed = None).rvs() A = mat2ten(mnrnd(var_M, var_Psi, Sigma).T, np.array([rank, rank, d]), 0) if iters + 1 > maxiter1 - maxiter2: A_plus += A Lambda_x = inv(Sigma) var1 = kr_prod(V, U).T var2 = kr_prod(var1, var1) var3 = (tau * np.matmul(var2, ten2mat(binary_tensor, 2).T).reshape([rank, rank, dim3]) + np.dstack([Lambda_x] * dim3)) var4 = tau * np.matmul(var1, ten2mat(sparse_tensor, 2).T) for t in range(dim3): Mt = np.zeros((rank, rank)) Nt = np.zeros(rank) if t < np.max(time_lags): Qt = np.zeros(rank) else: Qt = np.matmul(Lambda_x, np.matmul(ten2mat(A, 0), X[t - time_lags, :].reshape([rank * d]))) if t < dim3 - np.min(time_lags): if t >= np.max(time_lags) and t < dim3 - np.max(time_lags): index = list(range(0, d)) else: index = list(np.where((t + time_lags >= np.max(time_lags)) & (t + time_lags < dim3)))[0] for k in index: Ak = A[:, :, k] Mt += np.matmul(np.matmul(Ak.T, Lambda_x), Ak) A0 = A.copy() A0[:, :, k] = 0 var5 = (X[t + time_lags[k], :] - np.matmul(ten2mat(A0, 0), X[t + time_lags[k] - time_lags, :].reshape([rank * d]))) Nt += np.matmul(np.matmul(Ak.T, Lambda_x), var5) var_mu = var4[:, t] + Nt + Qt if t < np.max(time_lags): inv_var_Lambda = inv(var3[:, :, t] + Mt - Lambda_x + np.eye(rank)) else: inv_var_Lambda = inv(var3[:, :, t] + Mt) X[t, :] = mvnrnd(np.matmul(inv_var_Lambda, var_mu), inv_var_Lambda) if iters + 1 > maxiter1 - maxiter2: U_plus += U V_plus += V X_plus += X tensor_hat = cp_combine(U, V, X) if iters + 1 > maxiter1 - maxiter2: X_new[0 : dim3, :] = X.copy() X_new[dim3, :] = np.matmul(ten2mat(A, 0), X_new[dim3 - time_lags, :].reshape([rank * d])) X_new_plus += X_new tensor_hat_plus += cp_combine(U, V, X_new) tau = np.random.gamma(alpha + 0.5 * sparse_tensor[position].shape[0], 1/(beta + 0.5 * np.sum((sparse_tensor - tensor_hat)[position] ** 2))) rmse = np.sqrt(np.sum((dense_tensor[pos] - tensor_hat[pos]) ** 2)/dense_tensor[pos].shape[0]) if (iters + 1) % 200 == 0 and iters < maxiter1 - maxiter2: print('Iter: {}'.format(iters + 1)) print('RMSE: {:.6}'.format(rmse)) print() U = U_plus/maxiter2 V = V_plus/maxiter2 X = X_plus/maxiter2 X_new = X_new_plus/maxiter2 A = A_plus/maxiter2 tensor_hat = tensor_hat_plus/maxiter2 if maxiter1 >= 100: final_mape = np.sum(np.abs(dense_tensor[pos] - tensor_hat[pos])/dense_tensor[pos])/dense_tensor[pos].shape[0] final_rmse = np.sqrt(np.sum((dense_tensor[pos] - tensor_hat[pos]) ** 2)/dense_tensor[pos].shape[0]) print('Imputation MAPE: {:.6}'.format(final_mape)) print('Imputation RMSE: {:.6}'.format(final_rmse)) print() return tensor_hat, U, V, X_new, A # **How to transform a data set into something we can use for missing data imputation?** # # + import scipy.io tensor = scipy.io.loadmat('../NYC-data-set/tensor.mat') dense_tensor = tensor['tensor'] rm_tensor = scipy.io.loadmat('../NYC-data-set/rm_tensor.mat') rm_tensor = rm_tensor['rm_tensor'] nm_tensor = scipy.io.loadmat('../NYC-data-set/nm_tensor.mat') nm_tensor = nm_tensor['nm_tensor'] missing_rate = 0.1 # ============================================================================= ### Random missing (RM) scenario ### Set the RM scenario by: binary_tensor = np.round(rm_tensor + 0.5 - missing_rate) # ============================================================================= sparse_tensor = np.multiply(dense_tensor, binary_tensor) # - # **Question**: Given only the partially observed data $\mathcal{Y}\in\mathbb{R}^{m\times n\times f}$, how can we impute the unknown missing values? # # The main influential factors for such imputation model are: # # - `rank`. # # - `maxiter1`. # # - `maxiter2`. import time start = time.time() dim1, dim2, dim3 = sparse_tensor.shape rank = 30 time_lags = np.array([1, 2, 24]) d = time_lags.shape[0] init = {"U": 0.1 * np.random.rand(dim1, rank), "V": 0.1 * np.random.rand(dim2, rank), "X": 0.1 * np.random.rand(dim3, rank)} maxiter1 = 1100 maxiter2 = 100 BTTF(dense_tensor, sparse_tensor, init, rank, time_lags, maxiter1, maxiter2) end = time.time() print('Running time: %d seconds'%(end - start)) # + import scipy.io tensor = scipy.io.loadmat('../NYC-data-set/tensor.mat') dense_tensor = tensor['tensor'] rm_tensor = scipy.io.loadmat('../NYC-data-set/rm_tensor.mat') rm_tensor = rm_tensor['rm_tensor'] nm_tensor = scipy.io.loadmat('../NYC-data-set/nm_tensor.mat') nm_tensor = nm_tensor['nm_tensor'] missing_rate = 0.3 # ============================================================================= ### Random missing (RM) scenario ### Set the RM scenario by: binary_tensor = np.round(rm_tensor + 0.5 - missing_rate) # ============================================================================= sparse_tensor = np.multiply(dense_tensor, binary_tensor) # - import time start = time.time() dim1, dim2, dim3 = sparse_tensor.shape rank = 30 time_lags = np.array([1, 2, 24]) d = time_lags.shape[0] init = {"U": 0.1 * np.random.rand(dim1, rank), "V": 0.1 * np.random.rand(dim2, rank), "X": 0.1 * np.random.rand(dim3, rank), "theta": 0.1 * np.random.rand(d, rank)} maxiter1 = 1100 maxiter2 = 100 BTTF(dense_tensor, sparse_tensor, init, rank, time_lags, maxiter1, maxiter2) end = time.time() print('Running time: %d seconds'%(end - start)) # + import scipy.io tensor = scipy.io.loadmat('../NYC-data-set/tensor.mat') dense_tensor = tensor['tensor'] rm_tensor = scipy.io.loadmat('../NYC-data-set/rm_tensor.mat') rm_tensor = rm_tensor['rm_tensor'] nm_tensor = scipy.io.loadmat('../NYC-data-set/nm_tensor.mat') nm_tensor = nm_tensor['nm_tensor'] missing_rate = 0.1 # ============================================================================= ### Non-random missing (NM) scenario ### Set the NM scenario by: binary_tensor = np.zeros(dense_tensor.shape) for i1 in range(dense_tensor.shape[0]): for i2 in range(dense_tensor.shape[1]): for i3 in range(61): binary_tensor[i1, i2, i3 * 24 : (i3 + 1) * 24] = np.round(nm_tensor[i1, i2, i3] + 0.5 - missing_rate) # ============================================================================= sparse_tensor = np.multiply(dense_tensor, binary_tensor) # - import time start = time.time() dim1, dim2, dim3 = sparse_tensor.shape rank = 30 time_lags = np.array([1, 2, 24]) d = time_lags.shape[0] init = {"U": 0.1 * np.random.rand(dim1, rank), "V": 0.1 * np.random.rand(dim2, rank), "X": 0.1 * np.random.rand(dim3, rank), "theta": 0.1 * np.random.rand(d, rank)} maxiter1 = 1100 maxiter2 = 100 BTTF(dense_tensor, sparse_tensor, init, rank, time_lags, maxiter1, maxiter2) end = time.time() print('Running time: %d seconds'%(end - start)) # + import scipy.io tensor = scipy.io.loadmat('../NYC-data-set/tensor.mat') dense_tensor = tensor['tensor'] rm_tensor = scipy.io.loadmat('../NYC-data-set/rm_tensor.mat') rm_tensor = rm_tensor['rm_tensor'] nm_tensor = scipy.io.loadmat('../NYC-data-set/nm_tensor.mat') nm_tensor = nm_tensor['nm_tensor'] missing_rate = 0.3 # ============================================================================= ### Non-random missing (NM) scenario ### Set the NM scenario by: binary_tensor = np.zeros(dense_tensor.shape) for i1 in range(dense_tensor.shape[0]): for i2 in range(dense_tensor.shape[1]): for i3 in range(61): binary_tensor[i1, i2, i3 * 24 : (i3 + 1) * 24] = np.round(nm_tensor[i1, i2, i3] + 0.5 - missing_rate) # ============================================================================= sparse_tensor = np.multiply(dense_tensor, binary_tensor) # - import time start = time.time() dim1, dim2, dim3 = sparse_tensor.shape rank = 30 time_lags = np.array([1, 2, 24]) d = time_lags.shape[0] init = {"U": 0.1 * np.random.rand(dim1, rank), "V": 0.1 * np.random.rand(dim2, rank), "X": 0.1 * np.random.rand(dim3, rank), "theta": 0.1 * np.random.rand(d, rank)} maxiter1 = 1100 maxiter2 = 100 BTTF(dense_tensor, sparse_tensor, init, rank, time_lags, maxiter1, maxiter2) end = time.time() print('Running time: %d seconds'%(end - start)) # **Experiment results** of missing data imputation using Bayesian Temporal Tensor Factorization (BTTF): # # | scenario |`rank`|`maxiter1`|`maxiter2`| mape | rmse | # |:----------|-----:|---------:|---------:|-----------:|----------:| # |**0.1, RM**| 30 | 1100 | 100 | **0.5198** | **4.66**| # |**0.3, RM**| 30 | 1100 | 100 | **0.5178** | **4.77**| # |**0.1, NM**| 30 | 1100 | 100 | **0.5265** | **4.75**| # |**0.3, NM**| 30 | 1100 | 100 | **0.5271** | **4.90**|
experiments/Imputation-BTTF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="nknCkGvGunbc" # Application # + id="OAkOLzS9qgxc" executionInfo={"status": "ok", "timestamp": 1628777082660, "user_tz": -120, "elapsed": 527, "user": {"displayName": "Mushroom Recognition", "photoUrl": "", "userId": "12626355061292107466"}} #Choose one picture and execute notebook #from drive img_path= "/content/drive/MyDrive/work/Oliv/images_clean/Hygrocybe/34930.jpg" #or from internet img_path= "https://upload.wikimedia.org/wikipedia/commons/thumb/3/37/Cortinarius_purpurascens_65102.jpg/1024px-Cortinarius_purpurascens_65102.jpg" #img_path="https://static.aujardin.info/cache/th/champignons/lactarius-sanguifluus-600x450.jpg" #img_path = "https://www.pharmanatur.com/Mycologie/Gymnopus%20hariolorum%205.jpg" #img_path = "https://magazine.pretapousser.fr/wp-content/uploads/2016/09/000001-222.jpg" #un genre qu'il ne connait pas : #img_path = "https://static.aujardin.info/cache/th/champignons/psathyrella-corrugis-600x450.jpg" img_path= "/content/drive/MyDrive/work/Oliv/images_clean/Amanita/7162.jpg" # + id="fXQUq0MMvs_f" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628776513490, "user_tz": -120, "elapsed": 22236, "user": {"displayName": "Mushroom Recognition", "photoUrl": "", "userId": "12626355061292107466"}} outputId="c3339cd1-f0e7-43c5-d36f-98e0bd15f83a" import os import numpy as np import pandas as pd import tensorflow as tf from skimage import io from numpy import dstack from google.colab import drive import joblib #to load and save sklearn models import matplotlib.pyplot as plt import seaborn as sns GDRIVE_MOUNT_POINT = '/content/drive' WORK_DIR = "/content/drive/MyDrive/work/Oliv" MODEL_DIR = "/content/drive/MyDrive/work/dad/output/classifier/multiclass/10/" if not os.path.isdir(GDRIVE_MOUNT_POINT): drive.mount(GDRIVE_MOUNT_POINT) # + id="-bnoQe9un4Ev" executionInfo={"status": "ok", "timestamp": 1628776705140, "user_tz": -120, "elapsed": 321, "user": {"displayName": "Mushroom Recognition", "photoUrl": "", "userId": "12626355061292107466"}} list_genus=['Agaricus', 'Amanita', 'Armillaria', 'Cortinarius', 'Entoloma', 'Gymnopus', 'Hygrocybe', 'Lactarius', 'Marasmius', 'Russula'] # + id="qzrsxy9Q22oP" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628776690933, "user_tz": -120, "elapsed": 177451, "user": {"displayName": "Mushroom Recognition", "photoUrl": "", "userId": "12626355061292107466"}} outputId="f53d3a27-e02e-44cd-881f-55b9ba04bf4c" #Models need to be loaded def get_img_array(img_path, size): # `img` is a PIL image of size XxX if "https://" in img_path: img= io.imread(img_path) io.imsave(WORK_DIR + "/images_test/test.jpg" , img) img_path = WORK_DIR + "/images_test/test.jpg" img = tf.keras.preprocessing.image.load_img(img_path, target_size=size) #img = tf.keras.preprocessing.image.load_img(img, target_size=size) # `array` is a float32 Numpy array of shape (X, X, 3) array = tf.keras.preprocessing.image.img_to_array(img) # We add a dimension to transform our array into a "batch" # of size (1, X, X, 3) array = np.expand_dims(array, axis=0) return array def load_all_models(): all_models = {} filenames = [] for root, dirs, files in os.walk(MODEL_DIR): if ("0002/train/0001" in root): for i in files: full_name= os.path.join(root, i) root, extension = os.path.splitext(full_name) if extension==".h5": filenames.append(full_name) for file in filenames: if "xception" not in file: model = tf.keras.models.load_model(file) # add to list of members name = file.replace(MODEL_DIR, "") name = name.replace("/0002/train/0001/model/best_model.h5" , "") all_models[name]=model print('>Loading model %s' % file) return all_models # load all models members = load_all_models() print('Loaded %d models' % len(members)) filename = 'ensemble_model.sav' ensemble_model = joblib.load(WORK_DIR +"/saved_model/"+filename) print("Loaded Ensemble model") # + id="EV7fkPwgrKyF" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1628777105596, "user_tz": -120, "elapsed": 16913, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12626355061292107466"}} outputId="d05483b2-2028-41f7-bfad-e061451b992a" #Make prediction for one picture stackX = None df_list_genus = pd.DataFrame(list_genus, columns=["Genus"]) results = pd.DataFrame(data=[], index= list_genus) for name, model in members.items(): print("\nAnalyzing ", name) IMG_SIZE= model.input.get_shape().as_list()[1:3] img_array = get_img_array(img_path, size=IMG_SIZE) if "vgg16" in name: img_array = tf.keras.applications.vgg16.preprocess_input(img_array) elif "vgg19" in name: img_array = tf.keras.applications.vgg19.preprocess_input(img_array) elif "xception" in name: img_array = tf.keras.applications.xception.preprocess_input(img_array) elif "mobilenetv2" in name: img_array = tf.keras.applications.mobilenet_v2.preprocess_input(img_array) elif "resnet50" in name: img_array = tf.keras.applications.resnet50.preprocess_input(img_array) # make prediction print("Making predictions for " , name) yhat = model.predict(img_array, verbose=0) pred_ind = yhat.argmax(axis=1) df_probs= pd.DataFrame(yhat[0], columns=["Prob"]) #style.format({'Prob': '{:,.2%}'.format}) print(pd.concat([df_list_genus, df_probs] , axis=1).sort_values(by =["Prob"], axis =0, ascending=False).head(3).T) df_probs= pd.DataFrame(yhat[0], columns=[name], index=list_genus) results = pd.concat([results, df_probs] , axis=1) print("\nPrediction : ", list_genus[pred_ind[0]]) # stack predictions into [rows, members, probabilities] if stackX is None: stackX = yhat else: stackX = dstack((stackX, yhat)) del model # flatten predictions to [rows, members x probabilities] stackX = stackX.reshape((stackX.shape[0], stackX.shape[1]*stackX.shape[2])) print("\n") print("Analyzing meta-learner...") df_probs= pd.DataFrame(ensemble_model.predict_proba(stackX)[0], columns=["Prob"]) #style.format({'Prob': '{:,.2%}'.format}) print(pd.concat([df_list_genus, df_probs] , axis=1).sort_values(by =["Prob"], axis =0, ascending=False).head(3).T) pred = ensemble_model.predict(stackX) print("\n>>>>Prediction finale : ", list_genus[pred[0]]) df_probs= pd.DataFrame(ensemble_model.predict_proba(stackX)[0], columns=["Ensemble model"] , index=list_genus) results = pd.concat([results, df_probs] , axis=1) results #Temps total pour la prédiction une fois les modèles chargés : 5 secondes # + id="PAF8M6LqizfV" colab={"base_uri": "https://localhost:8080/", "height": 374} executionInfo={"status": "ok", "timestamp": 1628777106581, "user_tz": -120, "elapsed": 1023, "user": {"displayName": "Mushroom Recognition", "photoUrl": "", "userId": "12626355061292107466"}} outputId="cdcf6b7e-0370-4202-9268-ea2086255990" #plot results plt.figure(figsize=(6,6)) plt.plot(np.max(df_probs), pred[0] + 1,color='green', marker='o' , markersize=12) plt.boxplot(results.drop(["Ensemble model"], axis=1), labels=results.index , vert=False); # + id="oEBSdl6gi5Hb" colab={"base_uri": "https://localhost:8080/", "height": 248} executionInfo={"status": "ok", "timestamp": 1628777106584, "user_tz": -120, "elapsed": 29, "user": {"displayName": "Mushroom Recognition", "photoUrl": "", "userId": "12626355061292107466"}} outputId="9773c444-566a-4d4f-b92e-606accc95be1" #Display mushroom img= io.imread(img_path) plt.imshow(img) plt.axis("off") plt.show;
src/notebooks/06_demo/Application.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd from pprint import pprint from decision_tree_functions import decision_tree_algorithm, make_predictions, calculate_accuracy from helper_functions import generate_data, create_plot, train_test_split # - # # 1. Post-Pruning # + np.random.seed(0) df_train = generate_data(n=300, specific_outliers=[(5.4, 8.4)]) tree = decision_tree_algorithm(df_train, ml_task="classification", max_depth=10) create_plot(df_train, tree, title="Training Data") np.random.seed(7) df_val = generate_data(n=300) create_plot(df_val, tree, title="Validation Data") # - tree = {'x <= 5.0': [True, False]} def post_pruning(tree, df_train, df_val): question = list(tree.keys())[0] yes_answer, no_answer = tree[question] # base case if not isinstance(yes_answer, dict) and not isinstance(no_answer, dict): leaf = df_train.label.value_counts().index[0] errors_leaf = sum(df_val.label != leaf) errors_decision_node = sum(df_val.label != make_predictions(df_val, tree)) if errors_leaf <= errors_decision_node: return leaf else: return tree # recursive part else: return tree
Decision-Tree-from-Scratch/notebooks/Video 12 - Post Pruning 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # K- Nearest Neighbour # ### 1. Importing Libraries import numpy as np import math import matplotlib.pyplot as plt import pandas as pd import os from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn import metrics from sklearn.metrics import confusion_matrix from collections import defaultdict # ### 2. Data Preprocessing # + pima = pd.read_csv("diabetes.csv") pima.head() # + #normalizing the dataset scalar = preprocessing.MinMaxScaler() pima = scalar.fit_transform(pima) #split dataset in features and target variable X = pima[:,:8] y = pima[:, 8] X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.3, random_state=42) print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape) # - # finding the value of k k = int(math.sqrt(len(Y_test))) print(k) # it's an odd number so we will be taking this as k for now #plotting the data for one feature plt.scatter(X_train[:,1], X_train[:, 2], c = Y_train) # ### 3. Required functions # + def euclDist(X_train, Y_train, test_pt): i = 0 dist_list = [] for el in X_train: d = np.sum((test_pt - el)**2) label = Y_train[i] dist_list.append((d, label)) i = i + 1 return dist_list def score(Y_pred, Y_test): correct_pred = np.sum(Y_pred == Y_test) return correct_pred / Y_pred.shape[0] def KNNClassifier(X_train, Y_train, X_test, Y_test, k = 5): #defialt value of k is 5 Y_pred = [] for test_pt in X_test: dist_list = euclDist(X_train, Y_train, X_test) dist_list.sort() f = defaultdict(int) for j in range(k): f[dist_list[j][1]] = f[dist_list[j][1]] + 1 maxVal = - 1000000000 l = -1 for key in f: if(f[key] > maxVal): maxVal = f[key] l = key Y_pred.append(l) return Y_pred # - Y_pred = KNNClassifier(X_train, Y_train, X_test, Y_test, 13) print("The accuracy of the model is : {0}".format(score(np.array(Y_pred), Y_test)))
sem 5/machine learning/ml practicals/KNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + nbpresent={"id": "00d398b0-b3c8-4bcc-a7e9-02e2a71873d7"} import numpy as np import os from codeStore import support_fun as spf import importlib # + nbpresent={"id": "4547a67b-384b-439e-9177-6184fe7da51b"} # case 1, ecoli (head Force) model in finite pipe. ellipse_centerx_list = [0.75, 0.5, 0] rot_theta_list = np.linspace(0, 2, 11) # true theta=rot_theta_list*np.pi rs1_list = (0.1, 0.2, 0.3) PoiseuilleStrength_list = np.linspace(-0.5, 0.5, 11) # PoiseuilleStrength_list = (0, ) job_dir = 'case1a' if not os.path.exists(job_dir): os.makedirs(job_dir) t_name = os.path.join(job_dir, 'run.sh') n_pbs = 0 with open(t_name, 'w') as frun: # create .pbs file for ellipse_centerx_i in ellipse_centerx_list: for rot_theta_i in rot_theta_list: for rs1_i in rs1_list: if rs1_i + ellipse_centerx_i < 1: job_name = 'headF_x%.2f_rot%.2f_rs1%.2f' % (ellipse_centerx_i, rot_theta_i, rs1_i) t_name = os.path.join(job_dir, '%s.pbs' % job_name) with open(t_name, 'w') as fpbs: spf.write_pbs_head(fpbs, job_name) for PoiseuilleStrength_i in PoiseuilleStrength_list: output_name = '%s_P%.2f' % (job_name, PoiseuilleStrength_i) fpbs.write('mpirun -n 24 python ') fpbs.write(' ../head_Force_in_pipe.py ') fpbs.write(' -f %s ' % output_name) fpbs.write(' -sm %s ' % 'lg_rs') fpbs.write(' -legendre_m %d ' % 3) fpbs.write(' -legendre_k %d ' % 2) fpbs.write(' -epsilon %f ' % 3) fpbs.write(' -PoiseuilleStrength %f ' % PoiseuilleStrength_i) fpbs.write(' -ecoli_tail_strength %f ' % 1) fpbs.write(' -rs1 %f ' % rs1_i) fpbs.write(' -rs2 %f ' % 0.1) fpbs.write(' -ds %f ' % 0.01) fpbs.write(' -ellipse_centerx %f ' % ellipse_centerx_i) fpbs.write(' -ellipse_centery %f ' % 0) fpbs.write(' -ellipse_centerz %f ' % 0) fpbs.write(' -rot_theta %f ' % rot_theta_i) fpbs.write(' -finite_pipe_length %f ' % 10) fpbs.write(' -finite_pipe_cover %d ' % 1) fpbs.write(' -finite_pipe_ntheta %d ' % 100) fpbs.write(' -vtk_matname %s ' % 'pipe_20181209') fpbs.write(' -ksp_max_it %d ' % 500) fpbs.write(' > %s.txt \n\n' % output_name) # write to .sh file frun.write('qsub %s.pbs\n' % job_name) n_pbs = n_pbs + 1 frun.write('\n') print('n_pbs = ', n_pbs) # + nbpresent={"id": "5956ed1f-3047-4223-94a2-ff01b3f1044a"} # case 2, ecoli (head Force) model in finite pipe. motion loop ellipse_centerx_list = (0.75, 0.5, 0) rot_theta_list = (0, 1/3, ) # true theta=rot_theta_list*np.pi rs1_list = (0.1, ) PoiseuilleStrength_list = (0, 0.05) # PoiseuilleStrength_list = (0, ) PWD = os.getcwd() job_dir = 'case2a' if not os.path.exists(job_dir): os.makedirs(job_dir) t_name = os.path.join(job_dir, 'run.sh') n_pbs = 0 with open(t_name, 'w') as frun: # create .pbs file frun.write('t_dir=$PWD \n') for ellipse_centerx_i in ellipse_centerx_list: for rs1_i in rs1_list: if rs1_i + ellipse_centerx_i < 1: for rot_theta_i in rot_theta_list: for PoiseuilleStrength_i in PoiseuilleStrength_list: job_name = 'headF_x%.2f_rot%.2f_rs1%.2f_P%.2f' % \ (ellipse_centerx_i, rot_theta_i, rs1_i, PoiseuilleStrength_i) print(job_name) t_path = os.path.join(job_dir, job_name) if not os.path.exists(t_path): os.makedirs(t_path) t_name = os.path.join(t_path, '%s.pbs' % job_name) with open(t_name, 'w') as fpbs: spf.write_pbs_head(fpbs, job_name) fpbs.write('mpirun -n 24 python ') fpbs.write(' ../../motion_head_Force_pipe.py ') fpbs.write(' -f %s ' % job_name) fpbs.write(' -sm %s ' % 'lg_rs') fpbs.write(' -legendre_m %d ' % 3) fpbs.write(' -legendre_k %d ' % 2) fpbs.write(' -epsilon %f ' % 3) fpbs.write(' -PoiseuilleStrength %f ' % PoiseuilleStrength_i) fpbs.write(' -ecoli_tail_strength %f ' % 1) fpbs.write(' -rs1 %f ' % rs1_i) fpbs.write(' -rs2 %f ' % 0.1) fpbs.write(' -ds %f ' % 0.01) fpbs.write(' -ellipse_centerx %f ' % ellipse_centerx_i) fpbs.write(' -ellipse_centery %f ' % 0) fpbs.write(' -ellipse_centerz %f ' % 0) fpbs.write(' -rot_theta %f ' % rot_theta_i) fpbs.write(' -finite_pipe_length %f ' % 10) fpbs.write(' -finite_pipe_cover %d ' % 1) fpbs.write(' -finite_pipe_ntheta %d ' % 100) fpbs.write(' -vtk_matname %s ' % 'pipe_20181209') fpbs.write(' -ksp_max_it %d ' % 500) fpbs.write(' -max_iter %d ' % 500) fpbs.write(' -eval_dt %f ' % 0.01) fpbs.write(' > %s.txt \n\n' % job_name) # write to .sh file frun.write('cd $t_dir/%s\n' % job_name) frun.write('qsub %s.pbs\n\n' % job_name) n_pbs = n_pbs + 1 frun.write('\n') print('n_pbs = ', n_pbs) # + nbpresent={"id": "6301a16b-8680-4978-80ec-195a1badf3f3"} # ecoli_shear1, ecoli model in infinite shear flow. motion ecoli # eval_dt_list = [0.1, 0.05, 0.01,] # update_order_list = [1, 2, 3, 4, 5] # # eval_dt_fct_list = [] # max_iter = 500 # job_dir = 'ecoli_shear1c' # write_pbs_head = spf.write_pbs_head # eval_dt_list = [1, 10, 100] # update_order_list = [1, 2, 3, 4, 5] # # eval_dt_fct_list = [] # max_iter = 500 # job_dir = 'ecoli_shear1d' # write_pbs_head = spf.write_pbs_head # eval_dt_list = [0.5, 2, 4, 8] # update_order_list = [1, 2, 3, 4, 5] # # eval_dt_fct_list = [] # max_iter = 500 # job_dir = 'ecoli_shear1e' # write_pbs_head = spf.write_pbs_head eval_dt_list = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05] update_order_list = [1, 2, 3, 4, 5] # eval_dt_fct_list = [] max_iter = 500 job_dir = 'ecoli_shear1f' write_pbs_head = spf.write_pbs_head PWD = os.getcwd() if not os.path.exists(job_dir): os.makedirs(job_dir) t_name = os.path.join(job_dir, 'run.sh') n_pbs = 0 with open(t_name, 'w') as frun: # create .pbs file frun.write('t_dir=$PWD \n') for eval_dt_i in eval_dt_list: for update_order_i in update_order_list: job_name = 'eq_dt%0.3f_O%1d' % (eval_dt_i, update_order_i) print(job_name) t_path = os.path.join(job_dir, job_name) if not os.path.exists(t_path): os.makedirs(t_path) t_name = os.path.join(t_path, '%s.pbs' % job_name) with open(t_name, 'w') as fpbs: write_pbs_head(fpbs, job_name) fpbs.write('mpirun -n 24 python ') fpbs.write(' ../../motion_ecoli.py ') fpbs.write(' -f %s ' % job_name) fpbs.write(' -sm %s ' % 'pf') fpbs.write(' -pickProblem %d ' % 0) fpbs.write(' -save_singleEcoli_vtk %d ' % 0) fpbs.write(' -rh1 %f ' % 0.1) fpbs.write(' -rh2 %f ' % 0.03) fpbs.write(' -ch %f ' % 3) fpbs.write(' -nth %d ' % 20) fpbs.write(' -eh %f ' % -1) fpbs.write(' -ph %f ' % (2/3)) fpbs.write(' -hfct %f ' % 1) fpbs.write(' -n_tail %d ' % 1) fpbs.write(' -with_cover %d ' % 2) fpbs.write(' -left_hand %d ' % 0) fpbs.write(' -rs1 %f ' % 1.5) fpbs.write(' -rs2 %f ' % 0.5) fpbs.write(' -ds %f ' % 0.07) fpbs.write(' -es %f ' % -1) fpbs.write(' -with_T_geo %d ' % 0) fpbs.write(' -dist_hs %f ' % 0.5) fpbs.write(' -ksp_max_it %d ' % 500) fpbs.write(' -plot_geo %d ' % 0) fpbs.write(' -rel_wsz %f ' % 0) fpbs.write(' -rel_whz %f ' % 1) fpbs.write(' -ffweight %f ' % 2) fpbs.write(' -zoom_factor %f ' % 1) fpbs.write(' -max_iter %d ' % max_iter) fpbs.write(' -update_order %d ' % update_order_i) fpbs.write(' -eval_dt %f ' % eval_dt_i) fpbs.write(' -planeShearRatex %f ' % 0.003) fpbs.write(' > %s.txt \n\n' % job_name) # write to .sh file frun.write('cd $t_dir/%s\n' % job_name) frun.write('qsub %s.pbs\n\n' % job_name) n_pbs = n_pbs + 1 frun.write('\n') print('n_pbs = ', n_pbs) # + # head_shear, head-force model in infinite shear flow. motion ecoli # update_order_list = [1, ] # max_iter = 10001 # update_fun = 'Adams_Bashforth_Methods' # job_dir = 'head_shear1a' # rs2_list = np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1, 3, 5, 7, 9]) # ds_list = np.array([0.012, 0.022, 0.030, 0.035, 0.042, 0.05, 0.10, 0.17, 0.23, 0.30]) # eval_dt_list = np.array([1, 0.15, 0.10, 0.03, 0.06, 0.06, 0.03, 0.03, 0.03, 0.03]) * 0.1 # z_list = np.linspace(-3, 3, 5) # importlib.reload(spf) # write_pbs_head = spf.write_pbs_head_newturb # update_order_list = [1, ] # max_iter = 10001 # update_fun = 'Adams_Bashforth_Methods' # job_dir = 'head_shear1b' # rs2_list = np.array([3, 5, 7, 9]) # ds_list = np.array([0.10, 0.17, 0.23, 0.30]) # eval_dt_list = np.array([0.15, 0.24, 0.3 , 0.45]) * 0.1 # z_list = np.linspace(-3, 3, 5) # importlib.reload(spf) # write_pbs_head = spf.write_pbs_head_newturb # update_order_list = [1, 2, 3, 4, 5] # max_iter = 10001 # update_fun = 'Adams_Bashforth_Methods' # job_dir = 'head_shear1c' # rs2_list = np.array([0.1, 9]) # ds_list = np.array([0.012, 0.30]) # eval_dt_list = np.array([1, 0.45]) * 0.1 # z_list = [3, ] # importlib.reload(spf) # write_pbs_head = spf.write_pbs_head_newturb PWD = os.getcwd() if not os.path.exists(job_dir): os.makedirs(job_dir) t_name = os.path.join(job_dir, 'run.sh') n_pbs = 0 with open(t_name, 'w') as frun: # create .pbs file frun.write('t_dir=$PWD \n') for update_order_i in update_order_list: for rs2_i, dsi, eval_dt_i in zip(rs2_list, ds_list, eval_dt_list): for zi in z_list: job_name = 'eq_a%05.2f_o%1d' % (rs2_i, update_order_i) t_path = job_dir t_name = os.path.join(t_path, '%s.pbs' % job_name) with open(t_name, 'w') as fpbs: write_pbs_head(fpbs, job_name) fpbs.write('mpirun -n 24 python ') fpbs.write(' ../motion_head_Force.py ') fpbs.write(' -f %s ' % job_name) fpbs.write(' -sm %s ' % 'lg_rs') fpbs.write(' -pickProblem %d ' % 0) fpbs.write(' -save_singleEcoli_vtk %d ' % 0) fpbs.write(' -rs1 %f ' % 1) fpbs.write(' -rs2 %f ' % rs2_i) fpbs.write(' -ds %f ' % dsi) fpbs.write(' -es %f ' % -1) fpbs.write(' -ellipse_centerz %f ' % zi) fpbs.write(' -ksp_max_it %d ' % 500) fpbs.write(' -plot_geo %d ' % 0) fpbs.write(' -ffweight %f ' % 2) fpbs.write(' -zoom_factor %f ' % 1) fpbs.write(' -max_iter %d ' % max_iter) fpbs.write(' -update_order %d ' % update_order_i) fpbs.write(' -update_fun %s ' % update_fun) fpbs.write(' -eval_dt %f ' % eval_dt_i) fpbs.write(' -ecoli_tail_strength %f ' % 0.0) fpbs.write(' -planeShearRatex %f ' % 1) fpbs.write(' > %s.txt \n\n' % job_name) # write to .sh file frun.write('qsub %s.pbs\n\n' % job_name) n_pbs = n_pbs + 1 frun.write('\n') print('n_pbs = ', n_pbs) # + # random (or given) ini direction P of ellipse update_order_list = [1, ] max_iter = 20001 update_fun = 'Adams_Bashforth_Methods' job_dir = 'head_shear2' z_list = np.zeros(1) importlib.reload(spf) write_pbs_head = spf.write_pbs_head_newturb norm_theta_list = np.random.sample(5) * np.pi norm_phi_list = np.random.sample(5) * 2 * np.pi rs2_i = np.array([1 / 3]) dsi = np.array([0.03]) eval_dt_i = np.array([0.03]) zi = 0 PWD = os.getcwd() if not os.path.exists(job_dir): os.makedirs(job_dir) t_name = os.path.join(job_dir, 'run.sh') n_pbs = 0 with open(t_name, 'w') as frun: # create .pbs file frun.write('t_dir=$PWD \n') for norm_theta, norm_phi in zip(norm_theta_list, norm_phi_list): job_name = 'eq_theta%5.3f_phi%5.3f' % (norm_theta, norm_phi) t_path = job_dir t_name = os.path.join(t_path, '%s.pbs' % job_name) with open(t_name, 'w') as fpbs: write_pbs_head(fpbs, job_name) fpbs.write('mpirun -n 24 python ') fpbs.write(' ../motion_head_Force.py ') fpbs.write(' -f %s ' % job_name) fpbs.write(' -sm %s ' % 'lg_rs') fpbs.write(' -pickProblem %d ' % 0) fpbs.write(' -save_singleEcoli_vtk %d ' % 0) fpbs.write(' -rs1 %f ' % 1) fpbs.write(' -rs2 %f ' % rs2_i) fpbs.write(' -ds %f ' % dsi) fpbs.write(' -es %f ' % -1) fpbs.write(' -ellipse_centerz %f ' % zi) fpbs.write(' -ksp_max_it %d ' % 500) fpbs.write(' -plot_geo %d ' % 0) fpbs.write(' -ffweight %f ' % 2) fpbs.write(' -zoom_factor %f ' % 1) fpbs.write(' -max_iter %d ' % max_iter) fpbs.write(' -update_order %d ' % update_order_i) fpbs.write(' -update_fun %s ' % update_fun) fpbs.write(' -eval_dt %f ' % eval_dt_i) fpbs.write(' -ecoli_tail_strength %f ' % 0.0) fpbs.write(' -planeShearRatex %f ' % 1) fpbs.write(' -norm_theta %f ' % norm_theta) fpbs.write(' -norm_phi %f ' % norm_phi) fpbs.write(' > %s.txt \n\n' % job_name) # write to .sh file frun.write('qsub %s.pbs\n\n' % job_name) n_pbs = n_pbs + 1 frun.write('\n') print('n_pbs = ', n_pbs) # + # head_shear, head-velocity model in infinite shear flow. motion ecoli # two nondimentional parameters: /alpha=rs_1/rs_1, /kappa=|v|/(z*/tau) update_order_list = [1, ] max_iter = 3001 update_fun = 'Adams_Moulton_Methods' job_dir = 'head_shear_velocity1a' rs2_list = np.array([0.1, 1, 9]) ds_list = np.array([0.012, 0.1, 0.3]) eval_dt_list = np.array([1, 0.06, 0.45]) * 0.1 z_list = np.linspace(-1, 1, 5) planeShearRatex = 2 ecoli_velocity = 1 importlib.reload(spf) write_pbs_head = spf.write_pbs_head_newturb PWD = os.getcwd() if not os.path.exists(job_dir): os.makedirs(job_dir) t_name = os.path.join(job_dir, 'run.sh') n_pbs = 0 with open(t_name, 'w') as frun: # create .pbs file frun.write('t_dir=$PWD \n') for update_order_i in update_order_list: for rs2_i, dsi, eval_dt_i in zip(rs2_list, ds_list, eval_dt_list): for zi in z_list: job_name = 'eq_a%05.2f_z%05.2f' % (rs2_i, zi) t_path = job_dir t_name = os.path.join(t_path, '%s.pbs' % job_name) with open(t_name, 'w') as fpbs: write_pbs_head(fpbs, job_name) fpbs.write('mpirun -n 24 python ') fpbs.write(' ../motion_head_speed.py ') fpbs.write(' -f %s ' % job_name) fpbs.write(' -sm %s ' % 'lg_rs') fpbs.write(' -pickProblem %d ' % 0) fpbs.write(' -save_singleEcoli_vtk %d ' % 0) fpbs.write(' -rs1 %f ' % 1) fpbs.write(' -rs2 %f ' % rs2_i) fpbs.write(' -ds %f ' % dsi) fpbs.write(' -es %f ' % -1) fpbs.write(' -ellipse_centerz %f ' % zi) fpbs.write(' -ecoli_velocity %f ' % ecoli_velocity) fpbs.write(' -planeShearRatex %f ' % planeShearRatex) fpbs.write(' -ksp_max_it %d ' % 500) fpbs.write(' -plot_geo %d ' % 0) fpbs.write(' -ffweight %f ' % 2) fpbs.write(' -zoom_factor %f ' % 1) fpbs.write(' -max_iter %d ' % max_iter) fpbs.write(' -update_order %d ' % update_order_i) fpbs.write(' -update_fun %s ' % update_fun) fpbs.write(' -eval_dt %f ' % eval_dt_i) fpbs.write(' > %s.txt \n\n' % job_name) # write to .sh file frun.write('qsub %s.pbs\n\n' % job_name) n_pbs = n_pbs + 1 frun.write('\n') print('n_pbs = ', n_pbs) # + # case motion_ecoli_speed_dbg planeShearRatex_list = np.linspace(0, 9, 10) job_dir = 'motion_ecoli_speed_dbg1' importlib.reload(spf) write_pbs_head = spf.write_pbs_head_newturb PWD = os.getcwd() if not os.path.exists(job_dir): os.makedirs(job_dir) t_name = os.path.join(job_dir, 'run.sh') n_pbs = 0 with open(t_name, 'w') as frun: # create .pbs file frun.write('t_dir=$PWD \n') for planeShearRatex in planeShearRatex_list: job_name = 'dbg_tau%05.2f' % planeShearRatex t_path = job_dir t_name = os.path.join(t_path, '%s.pbs' % job_name) with open(t_name, 'w') as fpbs: write_pbs_head(fpbs, job_name) fpbs.write('time mpirun -n 24 python ') fpbs.write(' ../motion_ecoli_speed_dbg.py ') fpbs.write(' -f %s ' % job_name) fpbs.write(' -pickProblem %d ' % 0) fpbs.write(' -save_singleEcoli_vtk %d ' % 0) fpbs.write(' -rh1 %f ' % 0.1) fpbs.write(' -rh2 %f ' % 0.03) fpbs.write(' -ch %f ' % 3) fpbs.write(' -nth %d ' % 15) fpbs.write(' -eh %f ' % -1) fpbs.write(' -ph %f ' % (2/3)) fpbs.write(' -hfct %f ' % 1) fpbs.write(' -n_tail %d ' % 1) fpbs.write(' -with_cover %d ' % 2) fpbs.write(' -left_hand %d ' % 0) fpbs.write(' -rs1 %f ' % 1.5) fpbs.write(' -rs2 %f ' % 0.5) fpbs.write(' -ds %f ' % 0.1) fpbs.write(' -es %f ' % -1) fpbs.write(' -with_T_geo %d ' % 0) fpbs.write(' -dist_hs %f ' % 0.500000) fpbs.write(' -ksp_max_it %d ' % 100) fpbs.write(' -plot_geo %d ' % 0) fpbs.write(' -rel_wsz %f ' % 0) fpbs.write(' -rel_whz %f ' % 1) fpbs.write(' -ffweight %f ' % 2) fpbs.write(' -sm %s ' % 'pf') fpbs.write(' -zoom_factor %f ' % 1) fpbs.write(' -max_iter %d ' % 1) fpbs.write(' -update_order %d ' % 1) fpbs.write(' -update_fun %s ' % 'Adams_Moulton_Methods') fpbs.write(' -eval_dt %f ' % 0.01) fpbs.write(' -planeShearRatex %f ' % planeShearRatex) fpbs.write(' -ecoli_velocity %f ' % 1) fpbs.write(' > %s.txt \n\n' % job_name) # write to .sh file frun.write('qsub %s.pbs\n\n' % job_name) n_pbs = n_pbs + 1 frun.write('\n') print('n_pbs = ', n_pbs) # + # # case motion_ecoli_speed, # # planeShearRatex_list = np.array((0, 1/3, 1/2, 1, 2, 3, 10)) # # eval_dt_list = np.array((0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05)) # # max_iter = 100 # # update_order = 3 # # ksp_max_it = 200 # # update_fun = 'Adams_Moulton_Methods' # # ecoli_velocity = 1 # # job_dir = 'motion_ecoli_speed1' # # importlib.reload(spf) # # write_pbs_head = spf.write_pbs_head_newturb # PWD = os.getcwd() # if not os.path.exists(job_dir): # os.makedirs(job_dir) # t_name = os.path.join(job_dir, 'run.sh') # n_pbs = 0 # with open(t_name, 'w') as frun: # # create .pbs file # frun.write('t_dir=$PWD \n') # for planeShearRatex, eval_dt in zip(planeShearRatex_list, eval_dt_list): # job_name = 'dbg_tau%05.2f' % planeShearRatex # t_path = job_dir # t_name = os.path.join(t_path, '%s.pbs' % job_name) # with open(t_name, 'w') as fpbs: # write_pbs_head(fpbs, job_name) # fpbs.write('mpirun -n 24 python ') # fpbs.write(' ../motion_ecoli_speed.py ') # fpbs.write(' -f %s ' % job_name) # fpbs.write(' -pickProblem %d ' % 0) # fpbs.write(' -save_singleEcoli_vtk %d ' % 0) # fpbs.write(' -rh1 %f ' % 0.1) # fpbs.write(' -rh2 %f ' % 0.03) # fpbs.write(' -ch %f ' % 3) # fpbs.write(' -nth %d ' % 15) # fpbs.write(' -eh %f ' % -1) # fpbs.write(' -ph %f ' % (2/3)) # fpbs.write(' -hfct %f ' % 1) # fpbs.write(' -n_tail %d ' % 1) # fpbs.write(' -with_cover %d ' % 2) # fpbs.write(' -left_hand %d ' % 0) # fpbs.write(' -rs1 %f ' % 1.5) # fpbs.write(' -rs2 %f ' % 0.5) # fpbs.write(' -ds %f ' % 0.1) # fpbs.write(' -es %f ' % -1) # fpbs.write(' -with_T_geo %d ' % 0) # fpbs.write(' -dist_hs %f ' % 0.5) # fpbs.write(' -ksp_max_it %d ' % ksp_max_it) # fpbs.write(' -plot_geo %d ' % 0) # fpbs.write(' -rel_wsz %f ' % 0) # fpbs.write(' -rel_whz %f ' % 100) # fpbs.write(' -ffweight %f ' % 2) # fpbs.write(' -sm %s ' % 'pf') # fpbs.write(' -zoom_factor %f ' % 1) # fpbs.write(' -max_iter %d ' % max_iter) # fpbs.write(' -update_order %d ' % update_order) # fpbs.write(' -update_fun %s ' % update_fun) # fpbs.write(' -eval_dt %f ' % eval_dt) # fpbs.write(' -planeShearRatex %f ' % planeShearRatex) # fpbs.write(' -ecoli_velocity %f ' % ecoli_velocity) # fpbs.write(' > %s.txt \n\n' % job_name) # # write to .sh file # frun.write('qsub %s.pbs\n\n' % job_name) # n_pbs = n_pbs + 1 # frun.write('\n') # print('n_pbs = ', n_pbs) ##################################################################################################################################### # case motion_ecoli_speed, each case store in a independent folder. planeShearRatex_list = np.array((0, 1/3, 1/2, 1, 2, 3, 10)) eval_dt_list = np.array((0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02)) max_iter = 100 update_order = 3 ksp_max_it = 200 update_fun = 'Adams_Moulton_Methods' ecoli_velocity = 1 ch = 3 nth = 15 job_dir = 'motion_ecoli_speed2' importlib.reload(spf) write_pbs_head = spf.write_pbs_head_newturb PWD = os.getcwd() if not os.path.exists(job_dir): os.makedirs(job_dir) t_name = os.path.join(job_dir, 'run.sh') n_pbs = 0 with open(t_name, 'w') as frun: # create .pbs file frun.write('t_dir=$PWD \n') for planeShearRatex, eval_dt in zip(planeShearRatex_list, eval_dt_list): job_name = 'dbg_tau%05.2f' % planeShearRatex print(job_name) t_path = os.path.join(job_dir, job_name) if not os.path.exists(t_path): os.makedirs(t_path) t_name = os.path.join(t_path, '%s.pbs' % job_name) with open(t_name, 'w') as fpbs: write_pbs_head(fpbs, job_name) fpbs.write('mpirun -n 24 python ') fpbs.write(' ../../motion_ecoli_speed.py ') fpbs.write(' -f %s ' % job_name) fpbs.write(' -pickProblem %d ' % 0) fpbs.write(' -save_singleEcoli_vtk %d ' % 0) fpbs.write(' -rh1 %f ' % 0.1) fpbs.write(' -rh2 %f ' % 0.03) fpbs.write(' -ch %f ' % ch) fpbs.write(' -nth %d ' % nth) fpbs.write(' -eh %f ' % -1) fpbs.write(' -ph %f ' % (2/3)) fpbs.write(' -hfct %f ' % 1) fpbs.write(' -n_tail %d ' % 1) fpbs.write(' -with_cover %d ' % 2) fpbs.write(' -left_hand %d ' % 0) fpbs.write(' -rs1 %f ' % 1.5) fpbs.write(' -rs2 %f ' % 0.5) fpbs.write(' -ds %f ' % 0.1) fpbs.write(' -es %f ' % -1) fpbs.write(' -with_T_geo %d ' % 0) fpbs.write(' -dist_hs %f ' % 0.5) fpbs.write(' -ksp_max_it %d ' % ksp_max_it) fpbs.write(' -plot_geo %d ' % 0) fpbs.write(' -rel_wsz %f ' % 0) fpbs.write(' -rel_whz %f ' % 100) fpbs.write(' -ffweight %f ' % 2) fpbs.write(' -sm %s ' % 'pf') fpbs.write(' -zoom_factor %f ' % 1) fpbs.write(' -max_iter %d ' % max_iter) fpbs.write(' -update_order %d ' % update_order) fpbs.write(' -update_fun %s ' % update_fun) fpbs.write(' -eval_dt %f ' % eval_dt) fpbs.write(' -planeShearRatex %f ' % planeShearRatex) fpbs.write(' -ecoli_velocity %f ' % ecoli_velocity) fpbs.write(' > %s.txt \n\n' % job_name) # write to .sh file frun.write('cd $t_dir/%s\n' % job_name) frun.write('qsub %s.pbs\n\n' % job_name) n_pbs = n_pbs + 1 frun.write('\n') print('n_pbs = ', n_pbs) # + # case motion_ecoli_torque, each case store in a independent folder. rh1 = 0.1 rh2 = 0.03 main_fun_noIter = 0 sm = 'pf' # planeShearRatex_list = np.array((0, 1/10, 1/5, 1/3, 1/2, 1, 2)) # eval_dt_list = np.array((3e-4, 3e-4, 3e-4, 3e-4, 3e-4, 3e-4, 3e-4)) # max_iter = 1e6 # update_order = 3 # ksp_max_it = 300 # update_fun = 'Adams_Moulton_Methods' # ecoli_velocity = 1 # ch = 1 # nth = 15 # job_dir = 'motion_ecoli_torque1' # importlib.reload(spf) # write_pbs_head = spf.write_pbs_head_newturb # planeShearRatex_list = np.array((0, 1/10, 1/5, 1/3, 1/2, 1, 2)) # eval_dt_list = np.array((6e-4, 6e-4, 6e-4, 6e-4, 6e-4, 6e-4, 6e-4)) # max_iter = 1e6 # update_order = 3 # ksp_max_it = 300 # update_fun = 'Adams_Moulton_Methods' # ecoli_velocity = 1 # ch = 3 # nth = 15 # job_dir = 'motion_ecoli_torque2' # importlib.reload(spf) # write_pbs_head = spf.write_pbs_head_newturb # planeShearRatex_list = np.array((0, 1/10, 1/5, 1/3, 1/2, 1, 2, 10)) # eval_dt_list = np.ones_like(planeShearRatex_list) * 0.002 # max_iter = 1e6 # update_order = 3 # ksp_max_it = 300 # update_fun = 'Adams_Moulton_Methods' # ecoli_velocity = 1 # ch = 1 # nth = 15 # job_dir = 'motion_ecoli_torque3' # importlib.reload(spf) # write_pbs_head = spf.write_pbs_head_newturb # planeShearRatex_list = np.array((0, 1/10, 1/5, 1/3, 1/2, 1, 2, 10)) # eval_dt_list = np.ones_like(planeShearRatex_list) * 0.004 # max_iter = 1e6 # update_order = 3 # ksp_max_it = 300 # update_fun = 'Adams_Moulton_Methods' # ecoli_velocity = 1 # ch = 3 # nth = 15 # job_dir = 'motion_ecoli_torque4' # importlib.reload(spf) # write_pbs_head = spf.write_pbs_head_newturb # planeShearRatex_list = np.array((0, 1/10, 1/5, 1/3, 1/2, 1, 2, 10)) # eval_dt_list = np.ones_like(planeShearRatex_list) * 0.004 # max_iter = 1e6 # update_order = 3 # ksp_max_it = 300 # update_fun = 'Adams_Moulton_Methods' # ecoli_velocity = 1 # ch = 3 # nth = 15 # rh1 = 0.3 # rh2 = 0.1 # job_dir = 'motion_ecoli_torque5' # main_fun_noIter = 1 # importlib.reload(spf) # write_pbs_head = spf.write_pbs_head_newturb planeShearRatex_list = np.array((0, 1/10, 1/5, 1/3, 1/2, 1, 2, 10)) eval_dt_list = np.ones_like(planeShearRatex_list) * 0.007 max_iter = 1e6 update_order = 3 ksp_max_it = 300 update_fun = 'Adams_Moulton_Methods' ecoli_velocity = 1 ch = 3 nth = 15 rh1 = 0.3 rh2 = 0.1 job_dir = 'motion_ecoli_torque6' main_fun_noIter = 1 importlib.reload(spf) write_pbs_head = spf.write_pbs_head_newturb PWD = os.getcwd() if not os.path.exists(job_dir): os.makedirs(job_dir) t_name = os.path.join(job_dir, 'run.sh') n_pbs = 0 with open(t_name, 'w') as frun: # create .pbs file frun.write('t_dir=$PWD \n') for planeShearRatex, eval_dt in zip(planeShearRatex_list, eval_dt_list): job_name = 'dbg_tau%05.2f' % planeShearRatex print(job_name) t_path = os.path.join(job_dir, job_name) if not os.path.exists(t_path): os.makedirs(t_path) t_name = os.path.join(t_path, '%s.pbs' % job_name) with open(t_name, 'w') as fpbs: write_pbs_head(fpbs, job_name) fpbs.write('mpirun -n 24 python ') fpbs.write(' ../../motion_ecoli_torque.py ') fpbs.write(' -f %s ' % job_name) fpbs.write(' -pickProblem %d ' % 0) fpbs.write(' -save_singleEcoli_vtk %d ' % 0) fpbs.write(' -rh1 %f ' % rh1) fpbs.write(' -rh2 %f ' % rh2) fpbs.write(' -ch %f ' % ch) fpbs.write(' -nth %d ' % nth) fpbs.write(' -eh %f ' % -1) fpbs.write(' -ph %f ' % (2/3)) fpbs.write(' -hfct %f ' % 1) fpbs.write(' -n_tail %d ' % 1) fpbs.write(' -with_cover %d ' % 2) fpbs.write(' -left_hand %d ' % 0) fpbs.write(' -rs1 %f ' % 1.5) fpbs.write(' -rs2 %f ' % 0.5) fpbs.write(' -ds %f ' % 0.07) fpbs.write(' -es %f ' % -1) fpbs.write(' -with_T_geo %d ' % 0) fpbs.write(' -dist_hs %f ' % 0.5) fpbs.write(' -ksp_max_it %d ' % ksp_max_it) fpbs.write(' -plot_geo %d ' % 0) fpbs.write(' -rel_wsz %f ' % 0) fpbs.write(' -rel_whz %f ' % 100) fpbs.write(' -ffweight %f ' % 2) fpbs.write(' -sm %s ' % sm) fpbs.write(' -zoom_factor %f ' % 1) fpbs.write(' -max_iter %d ' % max_iter) fpbs.write(' -update_order %d ' % update_order) fpbs.write(' -update_fun %s ' % update_fun) fpbs.write(' -eval_dt %f ' % eval_dt) fpbs.write(' -planeShearRatex %f ' % planeShearRatex) fpbs.write(' -ecoli_velocity %f ' % ecoli_velocity) fpbs.write(' -main_fun_noIter %d ' % main_fun_noIter) fpbs.write(' > %s.txt \n\n' % job_name) # write to .sh file frun.write('cd $t_dir/%s\n' % job_name) frun.write('qsub %s.pbs\n\n' % job_name) n_pbs = n_pbs + 1 frun.write('\n') print('n_pbs = ', n_pbs) # + # case motion_ecoli_passive, each case store in a independent folder. ecoli_name = 'ecoli_B01' planeShearRatex = 1 eval_dt = 0.007 max_iter = 1e6 update_order = 1 ksp_max_it = 300 update_fun = 'Adams_Moulton_Methods' ecoli_velocity = 0 ch = 3 nth = 20 rh1 = 0.1 rh2 = 0.03 job_dir = 'motion_ecoli_passive1' sm = 'pf' importlib.reload(spf) write_pbs_head = spf.write_pbs_head_newturb PWD = os.getcwd() if not os.path.exists(job_dir): os.makedirs(job_dir) t_name = os.path.join(job_dir, 'run.sh') n_pbs = 0 with open(t_name, 'w') as frun: # create .pbs file frun.write('t_dir=$PWD \n') job_name = '%s' % ecoli_name print(job_name) t_path = os.path.join(job_dir, job_name) if not os.path.exists(t_path): os.makedirs(t_path) t_name = os.path.join(t_path, '%s.pbs' % job_name) with open(t_name, 'w') as fpbs: write_pbs_head(fpbs, job_name) fpbs.write('mpirun -n 24 python ') fpbs.write(' ../../motion_ecoli_torque.py ') fpbs.write(' -f %s ' % job_name) fpbs.write(' -pickProblem %d ' % 0) fpbs.write(' -save_singleEcoli_vtk %d ' % 0) fpbs.write(' -rh1 %f ' % rh1) fpbs.write(' -rh2 %f ' % rh2) fpbs.write(' -ch %f ' % ch) fpbs.write(' -nth %d ' % nth) fpbs.write(' -eh %f ' % -1) fpbs.write(' -ph %f ' % (2/3)) fpbs.write(' -hfct %f ' % 1) fpbs.write(' -n_tail %d ' % 1) fpbs.write(' -with_cover %d ' % 2) fpbs.write(' -left_hand %d ' % 0) fpbs.write(' -rs1 %f ' % 1.5) fpbs.write(' -rs2 %f ' % 0.5) fpbs.write(' -ds %f ' % 0.07) fpbs.write(' -es %f ' % -1) fpbs.write(' -with_T_geo %d ' % 0) fpbs.write(' -dist_hs %f ' % 0.5) fpbs.write(' -ksp_max_it %d ' % ksp_max_it) fpbs.write(' -plot_geo %d ' % 0) fpbs.write(' -rel_wsz %f ' % 0) fpbs.write(' -rel_whz %f ' % 100) fpbs.write(' -ffweight %f ' % 2) fpbs.write(' -sm %s ' % sm) fpbs.write(' -zoom_factor %f ' % 1) fpbs.write(' -max_iter %d ' % max_iter) fpbs.write(' -update_order %d ' % update_order) fpbs.write(' -update_fun %s ' % update_fun) fpbs.write(' -eval_dt %f ' % eval_dt) fpbs.write(' -planeShearRatex %f ' % planeShearRatex) fpbs.write(' -ecoli_velocity %f ' % ecoli_velocity) fpbs.write(' -passive_fun_noIter %d ' % 1) fpbs.write(' > %s.txt \n\n' % job_name) # write to .sh file frun.write('cd $t_dir/%s\n' % job_name) frun.write('qsub %s.pbs\n\n' % job_name) n_pbs = n_pbs + 1 frun.write('\n') print('n_pbs = ', n_pbs) # + # case motion_ecoli_torque, with different ini norm, each case store in a independent folder. ecoli_name = 'ecoli_B01' planeShearRatex = 1 eval_dt = 0.007 max_iter = 1e6 update_order = 1 ksp_max_it = 300 update_fun = 'Adams_Moulton_Methods' ecoli_velocity = 1 ch = 3 nth = 20 rh1 = 0.1 rh2 = 0.03 job_dir = 'motion_ecoli_torque7' sm = 'pf' importlib.reload(spf) write_pbs_head = spf.write_pbs_head_newturb ini_rot_theta_list = np.array([0, 1/6, 1/6, 1/6, 2/6, 2/6, 2/6, 2/6]) * np.pi ini_rot_phi_list = np.array([0, 1/6, 2/6, 3/6, 1/6, 2/6, 3/6, 4/6]) * np.pi main_fun_noIter = 1 PWD = os.getcwd() if not os.path.exists(job_dir): os.makedirs(job_dir) t_name = os.path.join(job_dir, 'run.sh') n_pbs = 0 with open(t_name, 'w') as frun: # create .pbs file frun.write('t_dir=$PWD \n') for ini_rot_theta, ini_rot_phi in zip(ini_rot_theta_list, ini_rot_phi_list): job_name = '%s_theta%4.2f_phi%4.2f' % (ecoli_name, ini_rot_theta, ini_rot_phi) print(job_name) t_path = os.path.join(job_dir, job_name) if not os.path.exists(t_path): os.makedirs(t_path) t_name = os.path.join(t_path, '%s.pbs' % job_name) with open(t_name, 'w') as fpbs: write_pbs_head(fpbs, job_name) fpbs.write('mpirun -n 24 python ') fpbs.write(' ../../motion_ecoli_torque.py ') fpbs.write(' -f %s ' % job_name) fpbs.write(' -pickProblem %d ' % 0) fpbs.write(' -save_singleEcoli_vtk %d ' % 0) fpbs.write(' -rh1 %f ' % rh1) fpbs.write(' -rh2 %f ' % rh2) fpbs.write(' -ch %f ' % ch) fpbs.write(' -nth %d ' % nth) fpbs.write(' -eh %f ' % -1) fpbs.write(' -ph %f ' % (2/3)) fpbs.write(' -hfct %f ' % 1) fpbs.write(' -n_tail %d ' % 1) fpbs.write(' -with_cover %d ' % 2) fpbs.write(' -left_hand %d ' % 0) fpbs.write(' -rs1 %f ' % 1.5) fpbs.write(' -rs2 %f ' % 0.5) fpbs.write(' -ds %f ' % 0.07) fpbs.write(' -es %f ' % -1) fpbs.write(' -with_T_geo %d ' % 0) fpbs.write(' -dist_hs %f ' % 0.5) fpbs.write(' -ksp_max_it %d ' % ksp_max_it) fpbs.write(' -plot_geo %d ' % 0) fpbs.write(' -rel_wsz %f ' % 0) fpbs.write(' -rel_whz %f ' % 100) fpbs.write(' -ffweight %f ' % 2) fpbs.write(' -sm %s ' % sm) fpbs.write(' -zoom_factor %f ' % 1) fpbs.write(' -max_iter %d ' % max_iter) fpbs.write(' -update_order %d ' % update_order) fpbs.write(' -update_fun %s ' % update_fun) fpbs.write(' -eval_dt %f ' % eval_dt) fpbs.write(' -planeShearRatex %f ' % planeShearRatex) fpbs.write(' -ecoli_velocity %f ' % ecoli_velocity) fpbs.write(' -main_fun_noIter %d ' % main_fun_noIter) fpbs.write(' > %s.txt \n\n' % job_name) # write to .sh file frun.write('cd $t_dir/%s\n' % job_name) frun.write('qsub %s.pbs\n\n' % job_name) n_pbs = n_pbs + 1 frun.write('\n') print('n_pbs = ', n_pbs) # + # # case motion_helix_passive, each case store in a independent folder. # rh1 = 0.1 # rh2 = 0.03 # main_fun_noIter = 0 # sm = 'pf' # planeShearRatex_list = np.array((0.1, 1, 10)) # eval_dt_list = np.ones_like(planeShearRatex_list) * 0.3 / planeShearRatex_list # max_iter = 1e6 # update_order = 3 # ksp_max_it = 300 # update_fun = 'Adams_Moulton_Methods' # ch = 3 # nth = 20 # rh1 = 0.3 # rh2 = 0.1 # job_dir = 'motion_helix_passive1' # main_fun_noIter = 1 # importlib.reload(spf) # write_pbs_head = spf.write_pbs_head # PWD = os.getcwd() # if not os.path.exists(job_dir): # os.makedirs(job_dir) # t_name = os.path.join(job_dir, 'run.sh') # n_pbs = 0 # with open(t_name, 'w') as frun: # # create .pbs file # frun.write('t_dir=$PWD \n') # for planeShearRatex, eval_dt in zip(planeShearRatex_list, eval_dt_list): # job_name = 'hlx_tau%05.2f' % planeShearRatex # print(job_name) # t_path = os.path.join(job_dir, job_name) # if not os.path.exists(t_path): # os.makedirs(t_path) # t_name = os.path.join(t_path, '%s.pbs' % job_name) # with open(t_name, 'w') as fpbs: # write_pbs_head(fpbs, job_name) # fpbs.write('mpirun -n 24 python ') # fpbs.write(' ../../motion_helix_passive.py ') # fpbs.write(' -f %s ' % job_name) # fpbs.write(' -pickProblem %d ' % 0) # fpbs.write(' -save_singleEcoli_vtk %d ' % 0) # fpbs.write(' -rh1 %f ' % rh1) # fpbs.write(' -rh2 %f ' % rh2) # fpbs.write(' -ch %f ' % ch) # fpbs.write(' -nth %d ' % nth) # fpbs.write(' -eh %f ' % -1) # fpbs.write(' -ph %f ' % (2/3)) # fpbs.write(' -hfct %f ' % 1) # fpbs.write(' -n_tail %d ' % 1) # fpbs.write(' -with_cover %d ' % 2) # fpbs.write(' -left_hand %d ' % 0) # fpbs.write(' -rs1 %f ' % 1.5) # fpbs.write(' -rs2 %f ' % 0.5) # fpbs.write(' -ds %f ' % 0.07) # fpbs.write(' -es %f ' % -1) # fpbs.write(' -with_T_geo %d ' % 0) # fpbs.write(' -dist_hs %f ' % 0.5) # fpbs.write(' -ksp_max_it %d ' % ksp_max_it) # fpbs.write(' -plot_geo %d ' % 0) # fpbs.write(' -rel_wsz %f ' % 0) # fpbs.write(' -rel_whz %f ' % 100) # fpbs.write(' -ffweight %f ' % 2) # fpbs.write(' -sm %s ' % sm) # fpbs.write(' -zoom_factor %f ' % 1) # fpbs.write(' -max_iter %d ' % max_iter) # fpbs.write(' -update_order %d ' % update_order) # fpbs.write(' -update_fun %s ' % update_fun) # fpbs.write(' -eval_dt %f ' % eval_dt) # fpbs.write(' -planeShearRatex %f ' % planeShearRatex) # fpbs.write(' -main_fun_noIter %d ' % main_fun_noIter) # fpbs.write(' > %s.txt \n\n' % job_name) # # write to .sh file # frun.write('cd $t_dir/%s\n' % job_name) # frun.write('qsub %s.pbs\n\n' % job_name) # n_pbs = n_pbs + 1 # frun.write('\n') # print('n_pbs = ', n_pbs) ###################################################################################################################################### # # planeShearRatex = 1 # # eval_dt = 0.3 # # n_tail_list = [2, 3, 4 ] # # max_iter = 1e5 # # update_order = 3 # # ksp_max_it = 300 # # update_fun = 'Adams_Moulton_Methods' # # ch = 3 # # nth = 20 # # rh1 = 0.1 # # rh2 = 0.03 # # job_dir = 'motion_helix_passive2b' # # main_fun_noIter = 1 # # importlib.reload(spf) # # write_pbs_head = spf.write_pbs_head # # sm = 'pf' # PWD = os.getcwd() # if not os.path.exists(job_dir): # os.makedirs(job_dir) # t_name = os.path.join(job_dir, 'run.sh') # n_pbs = 0 # with open(t_name, 'w') as frun: # # create .pbs file # frun.write('t_dir=$PWD \n') # for n_tail in n_tail_list: # job_name = 'hlx_ntail%d' % n_tail # print(job_name) # t_path = os.path.join(job_dir, job_name) # if not os.path.exists(t_path): # os.makedirs(t_path) # t_name = os.path.join(t_path, '%s.pbs' % job_name) # with open(t_name, 'w') as fpbs: # write_pbs_head(fpbs, job_name) # fpbs.write('mpirun -n 24 python ') # fpbs.write(' ../../motion_helix_passive.py ') # fpbs.write(' -f %s ' % job_name) # fpbs.write(' -pickProblem %d ' % 0) # fpbs.write(' -save_singleEcoli_vtk %d ' % 0) # fpbs.write(' -rh1 %f ' % rh1) # fpbs.write(' -rh2 %f ' % rh2) # fpbs.write(' -ch %f ' % ch) # fpbs.write(' -nth %d ' % nth) # fpbs.write(' -eh %f ' % -1) # fpbs.write(' -ph %f ' % (2/3)) # fpbs.write(' -hfct %f ' % 1) # fpbs.write(' -n_tail %d ' % n_tail) # fpbs.write(' -with_cover %d ' % 2) # fpbs.write(' -left_hand %d ' % 0) # fpbs.write(' -rs1 %f ' % 1.5) # fpbs.write(' -rs2 %f ' % 0.5) # fpbs.write(' -ds %f ' % 0.07) # fpbs.write(' -es %f ' % -1) # fpbs.write(' -with_T_geo %d ' % 0) # fpbs.write(' -dist_hs %f ' % 0.5) # fpbs.write(' -ksp_max_it %d ' % ksp_max_it) # fpbs.write(' -plot_geo %d ' % 0) # fpbs.write(' -rel_wsz %f ' % 0) # fpbs.write(' -rel_whz %f ' % 100) # fpbs.write(' -ffweight %f ' % 2) # fpbs.write(' -sm %s ' % sm) # fpbs.write(' -zoom_factor %f ' % 1) # fpbs.write(' -max_iter %d ' % max_iter) # fpbs.write(' -update_order %d ' % update_order) # fpbs.write(' -update_fun %s ' % update_fun) # fpbs.write(' -eval_dt %f ' % eval_dt) # fpbs.write(' -planeShearRatex %f ' % planeShearRatex) # fpbs.write(' -main_fun_noIter %d ' % main_fun_noIter) # fpbs.write(' > %s.txt \n\n' % job_name) # # write to .sh file # frun.write('cd $t_dir/%s\n' % job_name) # frun.write('qsub %s.pbs\n\n' % job_name) # n_pbs = n_pbs + 1 # frun.write('\n') # print('n_pbs = ', n_pbs) ###################################################################################################################################### planeShearRatex = 1 eval_dt = 0.5 n_tail_list = [1, ] max_iter = 1e3 update_order = 3 ksp_max_it = 300 update_fun = 'Adams_Moulton_Methods' ch = 3 nth = 20 rh1 = 0.1 rh2 = 0.03 job_dir = 'motion_helix_passive2c' main_fun_noIter = 1 importlib.reload(spf) write_pbs_head = spf.write_pbs_head_newturb sm = 'pf' ini_rot_theta_list = np.array([1/6, 1/6, 1/6, 2/6, 2/6, 2/6, 2/6]) * np.pi ini_rot_phi_list = np.array([1/6, 2/6, 3/6, 1/6, 2/6, 3/6, 4/6]) * np.pi PWD = os.getcwd() if not os.path.exists(job_dir): os.makedirs(job_dir) t_name = os.path.join(job_dir, 'run.sh') n_pbs = 0 with open(t_name, 'w') as frun: # create .pbs file frun.write('t_dir=$PWD \n') for n_tail in n_tail_list: for ini_rot_theta, ini_rot_phi in zip(ini_rot_theta_list, ini_rot_phi_list): job_name = 'hlx_ntail%d_theta%4.2f_phi%4.2f' % (n_tail, ini_rot_theta, ini_rot_phi) print(job_name) t_path = os.path.join(job_dir, job_name) if not os.path.exists(t_path): os.makedirs(t_path) t_name = os.path.join(t_path, '%s.pbs' % job_name) with open(t_name, 'w') as fpbs: write_pbs_head(fpbs, job_name) fpbs.write('mpirun -n 24 python ') fpbs.write(' ../../motion_helix_passive.py ') fpbs.write(' -f %s ' % job_name) fpbs.write(' -pickProblem %d ' % 0) fpbs.write(' -save_singleEcoli_vtk %d ' % 0) fpbs.write(' -rh1 %f ' % rh1) fpbs.write(' -rh2 %f ' % rh2) fpbs.write(' -ch %f ' % ch) fpbs.write(' -nth %d ' % nth) fpbs.write(' -eh %f ' % -1) fpbs.write(' -ph %f ' % (2/3)) fpbs.write(' -hfct %f ' % 1) fpbs.write(' -n_tail %d ' % n_tail) fpbs.write(' -with_cover %d ' % 2) fpbs.write(' -left_hand %d ' % 0) fpbs.write(' -rs1 %f ' % 1.5) fpbs.write(' -rs2 %f ' % 0.5) fpbs.write(' -ds %f ' % 0.07) fpbs.write(' -es %f ' % -1) fpbs.write(' -with_T_geo %d ' % 0) fpbs.write(' -dist_hs %f ' % 0.5) fpbs.write(' -ksp_max_it %d ' % ksp_max_it) fpbs.write(' -plot_geo %d ' % 0) fpbs.write(' -rel_wsz %f ' % 0) fpbs.write(' -rel_whz %f ' % 100) fpbs.write(' -ffweight %f ' % 2) fpbs.write(' -sm %s ' % sm) fpbs.write(' -zoom_factor %f ' % 1) fpbs.write(' -max_iter %d ' % max_iter) fpbs.write(' -update_order %d ' % update_order) fpbs.write(' -update_fun %s ' % update_fun) fpbs.write(' -eval_dt %f ' % eval_dt) fpbs.write(' -planeShearRatex %f ' % planeShearRatex) fpbs.write(' -main_fun_noIter %d ' % main_fun_noIter) fpbs.write(' -ini_rot_theta %f ' % ini_rot_theta) fpbs.write(' -ini_rot_phi %f ' % ini_rot_phi) fpbs.write(' > %s.txt \n\n' % job_name) # write to .sh file frun.write('cd $t_dir/%s\n' % job_name) frun.write('qsub %s.pbs\n\n' % job_name) n_pbs = n_pbs + 1 frun.write('\n') print('n_pbs = ', n_pbs) # + # case motion_helix_passive, each case store in a independent folder. planeShearRatex = 1 ini_rot_theta, ini_rot_phi = 0, 0 # eval_dt = 0.3 # n_tail_list = [2, 3, 4 ] # max_iter = 1e5 # update_order = 3 # ksp_max_it = 300 # update_fun = 'Adams_Moulton_Methods' # ch = 3 # nth = 20 # rh1 = 0.1 # rh2 = 0.03 # job_dir = 'motion_helix_passive2b' # main_fun_noIter = 1 # importlib.reload(spf) # write_pbs_head = spf.write_pbs_head # sm = 'pf' ini_rot_theta, ini_rot_phi = np.pi * np.array((1/4, 1/4)) eval_dt = 0.3 n_tail_list = [2, 3, 4 ] max_iter = 1e5 update_order = 3 ksp_max_it = 300 update_fun = 'Adams_Moulton_Methods' ch = 3 nth = 20 rh1 = 0.1 rh2 = 0.03 job_dir = 'motion_helix_passive2d' main_fun_noIter = 1 importlib.reload(spf) write_pbs_head = spf.write_pbs_head sm = 'pf' PWD = os.getcwd() if not os.path.exists(job_dir): os.makedirs(job_dir) t_name = os.path.join(job_dir, 'run.sh') n_pbs = 0 with open(t_name, 'w') as frun: # create .pbs file frun.write('t_dir=$PWD \n') for n_tail in n_tail_list: job_name = 'hlx_ntail%d' % n_tail print(job_name) t_path = os.path.join(job_dir, job_name) if not os.path.exists(t_path): os.makedirs(t_path) t_name = os.path.join(t_path, '%s.pbs' % job_name) with open(t_name, 'w') as fpbs: write_pbs_head(fpbs, job_name) fpbs.write('mpirun -n 24 python ') fpbs.write(' ../../motion_helix_passive.py ') fpbs.write(' -f %s ' % job_name) fpbs.write(' -pickProblem %d ' % 0) fpbs.write(' -save_singleEcoli_vtk %d ' % 0) fpbs.write(' -rh1 %f ' % rh1) fpbs.write(' -rh2 %f ' % rh2) fpbs.write(' -ch %f ' % ch) fpbs.write(' -nth %d ' % nth) fpbs.write(' -eh %f ' % -1) fpbs.write(' -ph %f ' % (2/3)) fpbs.write(' -hfct %f ' % 1) fpbs.write(' -n_tail %d ' % n_tail) fpbs.write(' -with_cover %d ' % 2) fpbs.write(' -left_hand %d ' % 0) fpbs.write(' -rs1 %f ' % 1.5) fpbs.write(' -rs2 %f ' % 0.5) fpbs.write(' -ds %f ' % 0.07) fpbs.write(' -es %f ' % -1) fpbs.write(' -with_T_geo %d ' % 0) fpbs.write(' -dist_hs %f ' % 0.5) fpbs.write(' -ksp_max_it %d ' % ksp_max_it) fpbs.write(' -plot_geo %d ' % 0) fpbs.write(' -rel_wsz %f ' % 0) fpbs.write(' -rel_whz %f ' % 100) fpbs.write(' -ffweight %f ' % 2) fpbs.write(' -sm %s ' % sm) fpbs.write(' -zoom_factor %f ' % 1) fpbs.write(' -max_iter %d ' % max_iter) fpbs.write(' -update_order %d ' % update_order) fpbs.write(' -update_fun %s ' % update_fun) fpbs.write(' -eval_dt %f ' % eval_dt) fpbs.write(' -planeShearRatex %f ' % planeShearRatex) fpbs.write(' -main_fun_noIter %d ' % main_fun_noIter) fpbs.write(' -ini_rot_theta %f ' % ini_rot_theta) fpbs.write(' -ini_rot_phi %f ' % ini_rot_phi) fpbs.write(' > %s.txt \n\n' % job_name) # write to .sh file frun.write('cd $t_dir/%s\n' % job_name) frun.write('qsub %s.pbs\n\n' % job_name) n_pbs = n_pbs + 1 frun.write('\n') print('n_pbs = ', n_pbs) # - ini_rot_theta_list
head_Force/generate_bash.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- import panel as pn pn.extension() from panel.widgets.input import PasswordInput # The ``PasswordInput`` allows entering any string using a obfuscated text input box. # # For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). # # #### Parameters: # # For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). # # ##### Core # # * **``value``** (str): Any string # # ##### Display # # * **``disabled``** (boolean): Whether the widget is editable # * **``name``** (str): The title of the widget # * **``placeholder``** (str): A placeholder string displayed when no value is entered # # ___ password_input = pn.widgets.input.PasswordInput(name='Password Input', placeholder='Enter a string here...') password_input # ``PasswordInput.value`` returns a string type that can be read out and set like other widgets: password_input.value
examples/reference/widgets/PasswordInput.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:flyvec] * # language: python # name: conda-env-flyvec-py # --- #default_exp utils #hide # %reload_ext autoreload # %autoreload 2 # # Delegation # Delegation can make a lot of python code that relies on `kwargs` easier. This function is taken from a # [blog post](https://www.fast.ai/2019/08/06/delegation/) by <NAME> @fast.ai # + #export import inspect def delegates(to=None, keep=False): "Decorator: replace `**kwargs` in signature with params from `to`" def _f(f): if to is None: to_f,from_f = f.__base__.__init__,f.__init__ else: to_f,from_f = to,f sig = inspect.signature(from_f) sigd = dict(sig.parameters) k = sigd.pop('kwargs') s2 = {k:v for k,v in inspect.signature(to_f).parameters.items() if v.default != inspect.Parameter.empty and k not in sigd} sigd.update(s2) if keep: sigd['kwargs'] = k from_f.__signature__ = sig.replace(parameters=sigd.values()) return f return _f # - # # Export - #hide from nbdev.export import notebook2script notebook2script()
nbs/04_utils.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # The data block API # + hide_input=true from fastai.gen_doc.nbdoc import * from fastai.tabular import * from fastai.text import * from fastai.vision import * np.random.seed(42) # - # The data block API lets you customize the creation of a [`DataBunch`](/basic_data.html#DataBunch) by isolating the underlying parts of that process in separate blocks, mainly: # 1. Where are the inputs and how to create them? # 1. How to split the data into a training and validation sets? # 1. How to label the inputs? # 1. What transforms to apply? # 1. How to add a test set? # 1. How to wrap in dataloaders and create the [`DataBunch`](/basic_data.html#DataBunch)? # # Each of these may be addresses with a specific block designed for your unique setup. Your inputs might be in a folder, a csv file, or a dataframe. You may want to split them randomly, by certain indices or depending on the folder they are in. You can have your labels in your csv file or your dataframe, but it may come from folders or a specific function of the input. You may choose to add data augmentation or not. A test set is optional too. Finally you have to set the arguments to put the data together in a [`DataBunch`](/basic_data.html#DataBunch) (batch size, collate function...) # # The data block API is called as such because you can mix and match each one of those blocks with the others, allowing for a total flexibility to create your customized [`DataBunch`](/basic_data.html#DataBunch) for training, validation and testing. The factory methods of the various [`DataBunch`](/basic_data.html#DataBunch) are great for beginners but you can't always make your data fit in the tracks they require. # # <img src="imgs/mix_match.png" alt="Mix and match" width="200"> # # As usual, we'll begin with end-to-end examples, then switch to the details of each of those parts. # ## Examples of use # Let's begin with our traditional MNIST example. path = untar_data(URLs.MNIST_TINY) tfms = get_transforms(do_flip=False) path.ls() (path/'train').ls() # In [`vision.data`](/vision.data.html#vision.data), we create an easy [`DataBunch`](/basic_data.html#DataBunch) suitable for classification by simply typing: data = ImageDataBunch.from_folder(path, ds_tfms=tfms, size=24) # This is aimed at data that is in folders following an ImageNet style, with the [`train`](/train.html#train) and `valid` directories, each containing one subdirectory per class, where all the pictures are. There is also a `test` directory containing unlabelled pictures. With the data block API, we can group everything together like this: data = (ImageItemList.from_folder(path) #Where to find the data? -> in path and its subfolders .split_by_folder() #How to split in train/valid? -> use the folders .label_from_folder() #How to label? -> depending on the folder of the filenames .add_test_folder() #Optionally add a test set (here default name is test) .transform(tfms, size=64) #Data augmentation? -> use tfms with a size of 64 .databunch()) #Finally? -> use the defaults for conversion to ImageDataBunch data.show_batch(3, figsize=(6,6), hide_axis=False) # Let's look at another example from [`vision.data`](/vision.data.html#vision.data) with the planet dataset. This time, it's a multiclassification problem with the labels in a csv file and no given split between valid and train data, so we use a random split. The factory method is: planet = untar_data(URLs.PLANET_TINY) planet_tfms = get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.) data = ImageDataBunch.from_csv(planet, folder='train', size=128, suffix='.jpg', label_delim = ' ', ds_tfms=planet_tfms) # With the data block API we can rewrite this like that: data = (ImageItemList.from_csv(planet, 'labels.csv', folder='train', suffix='.jpg') #Where to find the data? -> in planet 'train' folder .random_split_by_pct() #How to split in train/valid? -> randomly with the default 20% in valid .label_from_df(label_delim=' ') #How to label? -> use the csv file .transform(planet_tfms, size=128) #Data augmentation? -> use tfms with a size of 128 .databunch()) #Finally -> use the defaults for conversion to databunch data.show_batch(rows=2, figsize=(9,7)) # The data block API also allows you to get your data together in problems for which there is no direct [`ImageDataBunch`](/vision.data.html#ImageDataBunch) factory method. For a segmentation task, for instance, we can use it to quickly get a [`DataBunch`](/basic_data.html#DataBunch). Let's take the example of the [camvid dataset](http://mi.eng.cam.ac.uk/research/projects/VideoRec/CamVid/). The images are in an 'images' folder and their corresponding mask is in a 'labels' folder. camvid = untar_data(URLs.CAMVID_TINY) path_lbl = camvid/'labels' path_img = camvid/'images' # We have a file that gives us the names of the classes (what each code inside the masks corresponds to: a pedestrian, a tree, a road...) codes = np.loadtxt(camvid/'codes.txt', dtype=str); codes # And we define the following function that infers the mask filename from the image filename. get_y_fn = lambda x: path_lbl/f'{x.stem}_P{x.suffix}' # Then we can easily define a [`DataBunch`](/basic_data.html#DataBunch) using the data block API. Here we need to use `tfm_y=True` in the transform call because we need the same transforms to be applied to the target mask as were applied to the image. data = (SegmentationItemList.from_folder(path_img) .random_split_by_pct() .label_from_func(get_y_fn, classes=codes) .transform(get_transforms(), tfm_y=True, size=128) .databunch()) data.show_batch(rows=2, figsize=(7,5)) # Another example for object detection. We use our tiny sample of the [COCO dataset](http://cocodataset.org/#home) here. There is a helper function in the library that reads the annotation file and returns the list of images names with the list of labelled bboxes associated to it. We convert it to a dictionary that maps image names with their bboxes and then write the function that will give us the target for each image filename. coco = untar_data(URLs.COCO_TINY) images, lbl_bbox = get_annotations(coco/'train.json') img2bbox = dict(zip(images, lbl_bbox)) get_y_func = lambda o:img2bbox[o.name] # The following code is very similar to what we saw before. The only new addition is the use of a special function to collate the samples in batches. This comes from the fact that our images may have multiple bounding boxes, so we need to pad them to the largest number of bounding boxes. data = (ObjectItemList.from_folder(coco) #Where are the images? -> in coco .random_split_by_pct() #How to split in train/valid? -> randomly with the default 20% in valid .label_from_func(get_y_func) #How to find the labels? -> use get_y_func .transform(get_transforms(), tfm_y=True) #Data augmentation? -> Standard transforms with tfm_y=True .databunch(bs=16, collate_fn=bb_pad_collate)) #Finally we convert to a DataBunch and we use bb_pad_collate data.show_batch(rows=2, ds_type=DatasetType.Valid, figsize=(6,6)) # But vision isn't the only application where the data block API works. It can also be used for text and tabular data. With our sample of the IMDB dataset (labelled texts in a csv file), here is how to get the data together for a language model. imdb = untar_data(URLs.IMDB_SAMPLE) data_lm = (TextList.from_csv(imdb, 'texts.csv', cols='text') #Where are the inputs? Column 'text' of this csv .random_split_by_pct() #How to split it? Randomly with the default 20% .label_for_lm() #Label it for a language model .databunch()) data_lm.show_batch() # For a classification problem, we just have to change the way labelling is done. Here we use the csv column `label`. data_clas = (TextList.from_csv(imdb, 'texts.csv', cols='text') .split_from_df(col='is_valid') .label_from_df(cols='label') .databunch()) data_clas.show_batch() # Lastly, for tabular data, we just have to pass the name of our categorical and continuous variables as an extra argument. We also add some [`PreProcessor`](/data_block.html#PreProcessor)s that are going to be applied to our data once the splitting and labelling is done. adult = untar_data(URLs.ADULT_SAMPLE) df = pd.read_csv(adult/'adult.csv') dep_var = 'salary' cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country'] cont_names = ['education-num', 'hours-per-week', 'age', 'capital-loss', 'fnlwgt', 'capital-gain'] procs = [FillMissing, Categorify, Normalize] data = (TabularList.from_df(df, path=adult, cat_names=cat_names, cont_names=cont_names, procs=procs) .split_by_idx(valid_idx=range(800,1000)) .label_from_df(cols=dep_var) .databunch()) data.show_batch() # ## Step 1: Provide inputs # The basic class to get your inputs into is the following one. It's also the same class that will contain all of your labels (hence the name [`ItemList`](/data_block.html#ItemList)). # + hide_input=true show_doc(ItemList, title_level=3) # + [markdown] hide_input=true # This class regroups the inputs for our model in `items` and saves a `path` attribute which is where it will look for any files (image files, csv file with labels...). `create_func` is applied to `items` to get the final output. `label_cls` will be called to create the labels from the result of the label function, `xtra` contains additional information (usually an underlying dataframe) and `processor` is to be applied to the inputs after the splitting and labelling. # - # It has multiple subclasses depending on the type of data you're handling. Here is a quick list: # - [`CategoryList`](/data_block.html#CategoryList) for labels in classification # - [`MultiCategoryList`](/data_block.html#MultiCategoryList) for labels in a multi classification problem # - [`FloatList`](/data_block.html#FloatList) for float labels in a regression problem # - [`ImageItemList`](/vision.data.html#ImageItemList) for data that are images # - [`SegmentationItemList`](/vision.data.html#SegmentationItemList) like [`ImageItemList`](/vision.data.html#ImageItemList) but will default labels to [`SegmentationLabelList`](/vision.data.html#SegmentationLabelList) # - [`SegmentationLabelList`](/vision.data.html#SegmentationLabelList) for segmentation masks # - [`ObjectItemList`](/vision.data.html#ObjectItemList) like [`ImageItemList`](/vision.data.html#ImageItemList) but will default labels to `ObjectLabelList` # - `ObjectLabelList` for object detection # - [`PointsItemList`](/vision.data.html#PointsItemList) for points (of the type [`ImagePoints`](/vision.image.html#ImagePoints)) # - [`ImageImageList`](/vision.data.html#ImageImageList) for image to image tasks # - [`TextList`](/text.data.html#TextList) for text data # - [`TextFilesList`](/text.data.html#TextFilesList) for text data stored in files # - [`TabularList`](/tabular.data.html#TabularList) for tabular data # - [`CollabList`](/collab.html#CollabList) for collaborative filtering # Once you have selected the class that is suitable, you can instantiate it with one of the following factory methods # + hide_input=true show_doc(ItemList.from_folder) # + hide_input=true show_doc(ItemList.from_df) # + hide_input=true show_doc(ItemList.from_csv) # - # ### Optional step: filter your data # The factory method may have grabbed too many items. For instance, if you were searching sub folders with the `from_folder` method, you may have gotten files you don't want. To remove those, you can use one of the following methods. # + hide_input=true show_doc(ItemList.filter_by_func) # + hide_input=true show_doc(ItemList.filter_by_folder) # + hide_input=true show_doc(ItemList.filter_by_rand) # + hide_input=true show_doc(ItemList.to_text) # + hide_input=true show_doc(ItemList.use_partial_data) # - # ### Writing your own [`ItemList`](/data_block.html#ItemList) # First check if you can't easily customize one of the existing subclass by: # - subclassing an existing one and replacing the `get` method (or the `open` method if you're dealing with images) # - applying a custom `processor` (see step 4) # - changing the default `label_cls` for the label creation # - adding a default [`PreProcessor`](/data_block.html#PreProcessor) with the `_processor` class variable # # If this isn't the case and you really need to write your own class, there is a [full tutorial](/tutorial.itemlist) that explains how to proceed. # + hide_input=true show_doc(ItemList.analyze_pred) # + hide_input=true show_doc(ItemList.get) # + hide_input=true show_doc(ItemList.new) # - # You'll never need to subclass this normally, just don't forget to add to `self.copy_new` the names of the arguments that needs to be copied each time `new` is called in `__init__`. # + hide_input=true show_doc(ItemList.reconstruct) # - # ## Step 2: Split the data between the training and the validation set # This step is normally straightforward, you just have to pick oe of the following functions depending on what you need. # + hide_input=true show_doc(ItemList.no_split) # + hide_input=true show_doc(ItemList.random_split_by_pct) # + hide_input=true show_doc(ItemList.split_by_files) # + hide_input=true show_doc(ItemList.split_by_fname_file) # + hide_input=true show_doc(ItemList.split_by_folder) # + hide_input=true jekyll_note("This method looks at the folder immediately after `self.path` for `valid` and `train`.") # + hide_input=true show_doc(ItemList.split_by_idx) # + hide_input=true show_doc(ItemList.split_by_idxs) # + hide_input=true show_doc(ItemList.split_by_list) # + hide_input=true show_doc(ItemList.split_by_valid_func) # + hide_input=true show_doc(ItemList.split_from_df) # + hide_input=true jekyll_warn("This method assumes the data has been created from a csv file or a dataframe.") # - # ## Step 3: Label the inputs # To label your inputs, use one of the following functions. Note that even if it's not in the documented arguments, you can always pass a `label_cls` that will be used to create those labels (the default is the one from your input [`ItemList`](/data_block.html#ItemList), and if there is none, it will go to [`CategoryList`](/data_block.html#CategoryList), [`MultiCategoryList`](/data_block.html#MultiCategoryList) or [`FloatList`](/data_block.html#FloatList) depending on the type of the labels). This is implemented in the following function: # + hide_input=true show_doc(ItemList.get_label_cls) # - # The first example in these docs created labels as follows: path = untar_data(URLs.MNIST_TINY) ll = ImageItemList.from_folder(path).split_by_folder().label_from_folder().train # If you want to save the data necessary to recreate your [`LabelList`](/data_block.html#LabelList) (not including saving the actual image/text/etc files), you can use `to_df` or `to_csv`: # # ```python # ll.train.to_csv('tmp.csv') # ``` # # Or just grab a `pd.DataFrame` directly: ll.to_df().head() # + hide_input=true show_doc(ItemList.label_empty) # + hide_input=true show_doc(ItemList.label_from_list) # + hide_input=true show_doc(ItemList.label_from_df) # + hide_input=true jekyll_warn("This method only works with data objects created with either `from_csv` or `from_df` methods.") # + hide_input=true show_doc(ItemList.label_const) # + hide_input=true show_doc(ItemList.label_from_folder) # + hide_input=true jekyll_note("This method looks at the last subfolder in the path to determine the classes.") # + hide_input=true show_doc(ItemList.label_from_func) # + hide_input=true show_doc(ItemList.label_from_re) # + hide_input=true show_doc(CategoryList, title_level=3) # - # [`ItemList`](/data_block.html#ItemList) suitable for storing labels in `items` belonging to `classes`. If `None` are passed, `classes` will be determined by the unique different labels. `processor` will default to [`CategoryProcessor`](/data_block.html#CategoryProcessor). # + hide_input=true show_doc(MultiCategoryList, title_level=3) # - # It will store list of labels in `items` belonging to `classes`. If `None` are passed, `classes` will be determined by the unique different labels. `sep` is used to split the content of `items` in a list of tags. # # If `one_hot=True`, the items contain the labels one-hot encoded. In this case, it is mandatory to pass a list of `classes` (as we can't use the different labels). # + hide_input=true show_doc(FloatList, title_level=3) # + hide_input=true show_doc(EmptyLabelList, title_level=3) # - # ## Invisible step: preprocessing # This isn't seen here in the API, but if you passed a `processor` (or a list of them) in your initial [`ItemList`](/data_block.html#ItemList) during step 1, it will be applied here. If you didn't pass any processor, a list of them might still be created depending on what is in the `_processor` variable of your class of items (this can be a list of [`PreProcessor`](/data_block.html#PreProcessor) classes). # # A processor is a transformation that is applied to all the inputs once at initialization, with a state computed on the training set that is then applied without modification on the validation set (and maybe the test set). For instance, it can be processing texts to tokenize then numericalize them. In that case we want the validation set to be numericalized with exactly the same vocabulary as the training set. # # Another example is in tabular data, where we fill missing values with (for instance) the median computed on the training set. That statistic is stored in the inner state of the [`PreProcessor`](/data_block.html#PreProcessor) and applied on the validation set. # # This is the generic class for all processors. # + hide_input=true show_doc(PreProcessor, title_level=3) # + hide_input=true show_doc(PreProcessor.process_one) # - # Process one `item`. This method needs to be written in any subclass. # + hide_input=true show_doc(PreProcessor.process) # - # Process a dataset. This default to apply `process_one` on every `item` of `ds`. # + hide_input=true show_doc(CategoryProcessor, title_level=3) # + hide_input=true show_doc(CategoryProcessor.generate_classes) # + hide_input=true show_doc(MultiCategoryProcessor, title_level=3) # + hide_input=true show_doc(MultiCategoryProcessor.generate_classes) # - # ## Optional steps # ### Add transforms # Transforms differ from processors in the sense they are applied on the fly when we grab one item. They also may change each time we ask for the same item in the case of random transforms. # + hide_input=true show_doc(LabelLists.transform) # - # This is primary for the vision application. The `kwargs` are the one expected by the type of transforms you pass. `tfm_y` is among them and if set to `True`, the transforms will be applied to input and target. # ### Add a test set # To add a test set, you can use one of the two following methods. # + hide_input=true show_doc(LabelLists.add_test) # + hide_input=true jekyll_note("Here `items` can be an `ItemList` or a collection.") # + hide_input=true show_doc(LabelLists.add_test_folder) # + [markdown] hide_input=true # **Important**! No labels will be collected if available. Instead, either the passed `label` argument or a first label from `train_ds` will be used for all entries of this dataset. # # In the `fastai` framework `test` datasets have no labels - this is the unknown data to be predicted. # # If you want to use a `test` dataset with labels, you probably need to use it as a validation set, as in: # # ``` # data_test = (ImageItemList.from_folder(path) # .split_by_folder(train='train', valid='test') # .label_from_folder() # ...) # ``` # # Another approach, where you do use a normal validation set, and then when the training is over, you just want to validate the test set w/ labels as a validation set, you can do this: # # ``` # tfms = [] # path = Path('data').resolve() # data = (ImageItemList.from_folder(path) # .split_by_pct() # .label_from_folder() # .transform(tfms) # .databunch() # .normalize() ) # learn = create_cnn(data, models.resnet50, metrics=accuracy) # learn.fit_one_cycle(5,1e-2) # # # now replace the validation dataset entry with the test dataset as a new validation dataset: # # everything is exactly the same, except replacing `split_by_pct` w/ `split_by_folder` # # (or perhaps you were already using the latter, so simply switch to valid='test') # data_test = (ImageItemList.from_folder(path) # .split_by_folder(train='train', valid='test') # .label_from_folder() # .transform(tfms) # .databunch() # .normalize() # ) # learn.data = data_test # learn.validate() # ``` # Of course, your data block can be totally different, this is just an example. # - # ## Step 4: convert to a [`DataBunch`](/basic_data.html#DataBunch) # This last step is usually pretty straightforward. You just have to include all the arguments we pass to [`DataBunch.create`](/basic_data.html#DataBunch.create) (`bs`, `num_workers`, `collate_fn`). The class called to create a [`DataBunch`](/basic_data.html#DataBunch) is set in the `_bunch` attribute of the inputs of the training set if you need to modify it. Normally, the various subclasses we showed before handle that for you. # + hide_input=true show_doc(LabelLists.databunch) # - # ## Inner classes # + hide_input=true show_doc(LabelList, title_level=3) # - # Optionally apply `tfms` to `y` if `tfm_y` is `True`. # + hide_input=true show_doc(LabelList.export) # + hide_input=true show_doc(LabelList.transform_y) # + hide_input=true show_doc(LabelList.get_state) # + hide_input=true show_doc(LabelList.load_empty) # + hide_input=true show_doc(LabelList.load_state) # + hide_input=true show_doc(LabelList.process) # + hide_input=true show_doc(LabelList.set_item) # + hide_input=true show_doc(LabelList.to_df) # + hide_input=true show_doc(LabelList.to_csv) # + hide_input=true show_doc(LabelList.transform) # + hide_input=true show_doc(ItemLists, title_level=3) # + hide_input=true show_doc(ItemLists.label_from_lists) # + hide_input=true show_doc(ItemLists.transform) # + hide_input=true show_doc(ItemLists.transform_y) # + hide_input=true show_doc(LabelLists, title_level=3) # + hide_input=true show_doc(LabelLists.get_processors) # + hide_input=true show_doc(LabelLists.load_empty) # + hide_input=true show_doc(LabelLists.load_state) # + hide_input=true show_doc(LabelLists.process) # - # ## Helper functions # + hide_input=true show_doc(get_files) # - # ## Undocumented Methods - Methods moved below this line will intentionally be hidden show_doc(CategoryList.new) show_doc(LabelList.new) show_doc(CategoryList.get) show_doc(LabelList.predict) show_doc(ItemList.new) show_doc(ItemList.process_one) show_doc(ItemList.process) show_doc(MultiCategoryProcessor.process_one) show_doc(FloatList.get) show_doc(CategoryProcessor.process_one) show_doc(CategoryProcessor.create_classes) show_doc(CategoryProcessor.process) show_doc(MultiCategoryList.get) show_doc(FloatList.new) show_doc(FloatList.reconstruct) show_doc(MultiCategoryList.analyze_pred) show_doc(MultiCategoryList.reconstruct) show_doc(CategoryList.reconstruct) show_doc(CategoryList.analyze_pred) show_doc(EmptyLabelList.reconstruct) show_doc(EmptyLabelList.get) show_doc(LabelList.databunch) # ## New Methods - Please document or move to the undocumented section # + hide_input=true show_doc(ItemList.add) # - #
docs_src/data_block.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.4 64-bit (''python39-2vM0bQN-'': pipenv)' # name: python3 # --- # + [markdown] tags=[] # # Step 4: Upload a Dataset # - # ## Step 4a: Log into our Domain import syft as sy # Let's log into the domain using the credentials ADMIN_EMAIL = "<EMAIL>" ADMIN_PASSWORD = "<PASSWORD>" DOMAIN1_PORT = 8081 domain_client = sy.login( email=ADMIN_EMAIL, password=<PASSWORD>,port = DOMAIN1_PORT ) # ## Step 4b: Creating a Dataset # + from utils import download_mednist_dataset download_mednist_dataset() # - # file path where the MedNIST.pkl is downloaded FILE_PATH = "./MedNIST.pkl" # replace these with your own from the session details MY_PARTICIPANT_NUMBER = 1 TOTAL_PARTICIPANTS = 10 # ### Load the Dataset # Import helper methods from syft.core.adp.data_subject_list import DataSubjectList from utils import ( get_data_description, get_label_mapping, split_into_train_test_val_sets, load_data_as_df, ) # Let's load the dataset as a dataframe dataset_df = load_data_as_df(MY_PARTICIPANT_NUMBER, TOTAL_PARTICIPANTS, FILE_PATH) # Split the dataset into `train`, `validation` and `test` sets. data_dict = split_into_train_test_val_sets(dataset_df) dataset_description = get_data_description(dataset_df) print(dataset_description) # + import numpy as np assets = dict() for name, data in data_dict.items(): # Let's create data subjects list. # Data Subjects are the individuals whose privacy we're trying to protect. data_subjects = DataSubjectList.from_series(data["patient_id"]) # Convert images to numpy int64 array images = data["image"] images = np.dstack(images.values).astype(np.int64) # type cast to int64 dims = images.shape images = images.reshape(dims[0] * dims[1], dims[2]) # reshape to 2D array images = np.rollaxis(images, -1) # Convert labels to numpy int64 array labels = data["label"].to_numpy().astype("int64") # Next we will make your data private private with min, max and data subjects. # The min and max are minimum and maximum value in the given data. # converting images to private data image_data = sy.Tensor(images).private( min_val=0, max_val=255, data_subjects=data_subjects ) # converting labels to private data label_data = sy.Tensor(labels).private( min_val=0, max_val=5, data_subjects=data_subjects ) assets[f"{name}_images"] = image_data assets[f"{name}_labels"] = label_data # - # ## Step 4c: Upload the Dataset # + # creating/uploading the dataset # Name of the dataset name = f"MedNIST Data {MY_PARTICIPANT_NUMBER}/{TOTAL_PARTICIPANTS}" # - # upload the MedNIST data domain_client.load_dataset( assets=assets, name=name, description=dataset_description ) # Now let's check if the dataset we successfully uploaded domain_client.datasets domain_client.datasets[-1] # ## Step 4d: Create a Data Scientist Account data_scientist_details = { "name": "<NAME>", "email": "<EMAIL>", "password": "<PASSWORD>", "budget": 9999, } domain_client.users.create(**data_scientist_details) print("Please give these details to the data scientist 👇🏽") login_details = {} login_details["url"] = 8081 login_details["name"] = data_scientist_details["name"] login_details["email"] = data_scientist_details["email"] login_details["password"] = <PASSWORD>["password"] login_details["dataset_name"] = name print() print(login_details)
notebooks/Experimental/Rasswanth/MedNIST_demo/MN-data-owners-upload.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ga8ZwCa9x96W" # #Neural Network Implementation# # # by <NAME> and <NAME>, Santa Cruz Artificial Intelligence # derived from [this blog post](https://github.com/SkalskiP/ILearnDeepLearning.py/blob/master/01_mysteries_of_neural_networks/03_numpy_neural_net/Numpy%20deep%20neural%20network.ipynb) # # Today, we're going to be creating and training a deep neural network from the information we learned. We'll prepare a dataset, and once we are done making the neural net, we'll train it and make some predictons using the data we set up! # + [markdown] id="zLALhqXXjkJg" # First: let's review the steps needed to create a Neural Network: # # # # 1. Prepare Data # 2. Initialize Neural Network Layers # 3. Activation functions (used to normalize outputs of neurons for next layer) # 4. Forward Propagation (used for predictions) # 5. Loss function (ensures we are moving in the right direction) # 6. Backward Propagation (learning and training) # 7. Updating parameters (optimize gradients) # # # + [markdown] id="aZbamUKPkwL1" # # 1. Prepare Dataset: # + id="CkaiNN3bk1ft" import numpy as np import random from sklearn.model_selection import train_test_split from sklearn.datasets import load_iris iris_dataset = load_iris() # generate our training and testing labels and data! X_train, X_test, y_train, y_test = train_test_split(iris_dataset.data, iris_dataset.target, test_size=0.1, random_state=42) # + [markdown] id="l8sGK5awk5bb" # # 2. Initialize Neural Network Layers # + id="z8E-W4dlk9OW" # this creates the architecture for our neural network. notice how the # dimension of inputs for layer `n` is equal to the dimensions of the outputs of # layer `n-1`. the final layer has only one output, which is our prediction network_config = [ { 'input_dim': 2, 'output_dim': 25, 'activation': 'relu' }, { 'input_dim': 25, 'output_dim': 50, 'activation': 'relu' }, { 'input_dim': 50, 'output_dim': 50, 'activation': 'relu' }, { 'input_dim': 50, 'output_dim': 25, 'activation': 'relu' }, { 'input_dim': 25, 'output_dim': 1, 'activation': 'sigmoid' } ] # + id="fHj7ZTabmvSA" def init_layers(config, seed=69): np.random.seed(seed) network_num_layers = len(config) network_params = {} # enumerate is a python function which takes a dictionary and returns a # collated representation of the keys: # eg: {'a': 23, 'b': 42} -> [(0, 'a'), (1, 'b')] # this makes it easier to iterate over a dictionary, which otherwise does not # have any order for idx, layer in enumerate(config): # number the layers, starting at 1 layer_id = idx + 1 layer_in = layer['input_dim'] layer_out = layer['output_dim'] # the np.random.randn function creates an array prepopulated with values # sampled from a normal distribution (standard deviation 1, mean 0) # the size of the array is determined by the arguments passed # TODO: randomly initialize the weights and biases of the network. # keep in mind that we are looping through the layers above, so this # should be done dynamically, by using the keys W1/b1, W2/b2 etc for # each layer in the dictionary network_params['W' + str(layer_id)] = np.random.randn(layer_out, layer_in) * 0.1 network_params['b' + str(layer_id)] = np.random.randn(layer_out, 1) * 0.1 return network_params # + [markdown] id="vc2YUBcxmjMG" # # 3. Activation Functions # + id="Xhs8SdI1mpPd" # TODO: write the sigmoid function # YOUR CODE BELOW from math import e def sigmoid(Z): return 1 / (1 + e ** -Z) # TODO: write the relu function negative numbers are mapped to zero, otherwise # the value remains the same # YOUR CODE BELOW def relu(Z): if Z <= 0: return 0 return Z # sigmoid derviative, which we will use during the backpropagation step def sigmoid_derivative(dA, Z): return dA * sigmoid(Z) * (1 - sigmoid(Z)) # relu derviative, which we will use during the backpropagation step def relu_derivative(dA, Z): dZ = np.array(dA, copy=True) dZ[Z <= 0] = 0 return dZ # + [markdown] id="5Q_CGrVxueo9" # # 4. Forward Propagation # + id="eEYVqXyswD1D" # execute the forward propagation step for one layer # this is a helper function for the `full_forward_prop function` def single_layer_forward_prop(A_prev, W, b, activation='relu'): # matrix multiply the weights with the old activation signal, and add bias # YOUR CODE HERE # take this new intermediate result and calculate its activation signal # YOUR CODE HERE return A, Z # complete forward propagation step, used to run inferences def full_forward_prop(X, params, config): # this is temporary "memory"/"cache" dictionary, which will store, among other # things, the temporary activation signals and matrices we need to carry out # backpropagation temp = {} # the activation signal for the first layer is simply the input A = X # iterate over the layers to carry out forward propagation for idx, layer in enumerate(config): layer_id = idx + 1 # get the activation signal from the previous layer A_prev = A # get activation function, weights, and bias for the current layer # YOUR CODE HERE # carry out forward propagation for one layer # YOUR CODE HERE # store the activation signal and intermediate matrix in memory temp['A' + str(idx)] = A_prev temp['Z' + str(layer_id)] = Z return A, temp # + [markdown] id="61akOhHP03Zz" # # 5. Loss Function # + id="PgN_ayF_0_2p" # simply a python conversion of the Binary Cross Entropy Loss function # we are using this loss function because it is ideal for choosing between # two classes def cost_fn(y_hat, y): m = y_hat.shape[1] # TODO: Write the cost function! this can be found in the slides as well # YOUR CODE HERE # the np.squeeze function gets rid of extra dimensions of an array # eg: [[[[[1, 2, 3]], [[4, 5, 6]]]]] --> [[1, 2, 3], [4, 5, 6]] return np.squeeze(cost) # a utility function which takes probabilities and changes them into classes def prob_to_class(p): p_ = np.copy(p) p_[p_ > 0.5] = 1 p_[p_ <= 0.5] = 0 return p_ # calculate the accuracy def accuracy_fn(y_hat, y): y_hat_ = prob_to_class(y_hat) # (y_hat_ == y) returns a matrix of boolean values, comparing each entry # in both matrices # once this is done, .all returns the `logical and` across axis 0, and # computes the mean to give us our value for accuracy return (y_hat_ == y).all(axis = 0).mean() # + [markdown] id="JDv59c6S7-EL" # # 6. Backpropagation # + id="nMMfl-X48AJJ" # backpropagation step for a single layer def single_layer_back_prop(dA, W, b, Z, A_prev, activation="relu"): # get number of data points m = A_prev.shape[1] # math -> python dZ = relu_derivative(dA, Z) if activation == 'relu' else sigmoid_derivative(dA, Z) dW = np.dot(dZ, A_prev.T) / m db = np.sum(dZ, axis=1, keepdims=True) / m dA_prev = np.dot(W.T, dZ) return dW, db, dA_prev def full_back_prop(y_hat, y, temp, params, config): # dictionary we will use to calculate gradients gradients = {} m = y.shape[1] # ensure prediction and truth value vectors have same shape y = y.reshape(y_hat.shape) # TODO: differential of loss with respect to prediction, also found in the # slides # YOUR CODE HERE: # as implied in the algorithm name, we want to work backward and use what we # have learned to compute the derivatives of the cost function with respect to # the weights for idx, layer in reversed(list(enumerate(config))): layer_id = idx + 1 activation = layer['activation'] dA = dA_prev # TODO recover the parameters for a given layer # YOUR CODE HERE # TODO: carry out a single step of backpropagation on this layer # YOUR CODE HERE # add the gradients we have calculated to be used later gradients["dW" + str(layer_id)] = dW gradients["db" + str(layer_id)] = db return gradients # + [markdown] id="XAT5dAgvCjas" # # 7. Update Parameters # + id="l9iW2mzsCmRN" # the actual learning part: using the gradients we have calculated to update the # weights and biases def update_params(params, config, gradients, learning_rate): # this time, we want to start the enumeration at 1 because we are moving up # the layers for idx, layer in enumerate(config, 1): # the learning rate is a hyperparameter which controls how fast or slowly # the parameters are updated (as you can see below, it directly scales by # how much the gradient is subtracted) # a super low learning rate could lead to the network never "converging" # and reaching an optimal solution, but a learning rate which is too high # could lead to DIVERGENCE, which is also very bad # TODO: UPDATE THE PARAMETERS USING THE CALCUATED GRADIENTS AND LEARNING RATE # YOUR CODE HERE return params # + [markdown] id="AARF97JtDMUM" # # Finally... the training step! # + id="wmFvkXvrDOfj" def train(X, y, config, epochs, learning_rate): # create the network params = init_layers(config) # collect the cost and accuracy throughout training so that we can plot it if # we want cost_history = [] accuracy_history = [] # an epoch is a hyperparameter which siginifies the number of complete passes # over the training dataset for i in range(epochs): # put together everything we wrote up there! y_hat, mem = full_forward_prop(X, params, config) # store accuracy and cost for later cost_history.append(cost_fn(y_hat, y)) accuracy_history.append(accuracy_fn(y_hat, y)) # TODO: CALCULATE GRADIENTS USING FULL BACKPROP AND UPDATE THE PARAMETERS # YOUR CODE HERE # we return the parameters here: aka, the newly trained weights and biases # for each layer of the network. We can use this set of optimized parameters # to run inference on new data, which is done below! return params, cost_history, accuracy_history # + [markdown] id="QI6j7y4QFG-Q" # # Testing # + id="1SYyGcRoJchv" # TODO: train this bad boy! note: you will have to transpose the X and Y vectors # to get them in the right shape. You can do this with `np.transpose` # YOUR CODE HERE: # + id="uWpqT0Cl7mmV" # %matplotlib inline import matplotlib.pyplot as plt # quickly just plot accuracy over training plt.plot(accuracy_history) plt.ylabel('accuracy') plt.xlabel('epoch') plt.title('accuracy over training period') plt.show() # + id="PrAWJSKTTKhV" # run inferences on test data! # remember notation: y_hat is the predicted values, and we predict these values # by sending our test data through the network via the FORWARD PROPAGATION step. y_test_hat, _ = full_forward_prop(np.transpose(X_test), params, network_config) # calculate and print final test accuracy test_accuracy = accuracy_fn(y_test_hat, np.transpose(y_test.reshape((y_test.shape[0], 1)))) print("Test set accuracy: {:.2f}".format(test_accuracy))
main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Ôn tập lại Python 1 tí cho vui - Phần 2: Functional programming trong Python # # # https://stackabuse.com/functional-programming-in-python # https://www.reddit.com/r/Python/comments/a8swkw/understanding_python_lambda_functions_and_why/ # https://www.reddit.com/r/learnpython/comments/3lyv56/when_do_you_use_a_lambda_function/ # https://www.reddit.com/r/learnpython/comments/5wjavx/what_is_the_point_of_lambda_in_python/ # https://realpython.com/python-lambda/ # https://en.wikipedia.org/wiki/Anonymous_function
python_review_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # + # !pip install monai tqdm # !python -c "import monai" || pip install -q "monai-weekly[nibabel, tqdm]" # !python -c "import matplotlib" || pip install -q matplotlib # !pip install self-attention-cv==1.2.3 # %matplotlib inline import os import shutil import tempfile import matplotlib.pyplot as plt import numpy as np from monai.apps import DecathlonDataset from monai.config import print_config from monai.data import DataLoader from monai.losses import DiceLoss, DiceCELoss from monai.metrics import DiceMetric from monai.networks.nets import UNet from monai.transforms import ( Activations, AsChannelFirstd, AsDiscrete, CenterSpatialCropd, Compose, LoadImaged, MapTransform, NormalizeIntensityd, Orientationd, RandFlipd, RandScaleIntensityd, RandShiftIntensityd, RandSpatialCropd, Spacingd, ToTensord, ) from monai.utils import set_determinism import torch print_config() # + root_dir = './' print(root_dir) set_determinism(seed=0) class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform): """ Convert labels to multi channels based on brats classes: label 1 is the peritumoral edema label 2 is the GD-enhancing tumor label 3 is the necrotic and non-enhancing tumor core The possible classes are TC (Tumor core), WT (Whole tumor) and ET (Enhancing tumor). """ def __call__(self, data): d = dict(data) for key in self.keys: result = [] # merge label 2 and label 3 to construct TC result.append(np.logical_or(d[key] == 2, d[key] == 3)) # merge labels 1, 2 and 3 to construct WT result.append( np.logical_or( np.logical_or(d[key] == 2, d[key] == 3), d[key] == 1 ) ) # label 2 is ET result.append(d[key] == 2) d[key] = np.stack(result, axis=0).astype(np.float32) return d # + roi_size=[128, 128, 64] pixdim=(1.5, 1.5, 2.0) train_transform = Compose( [ # load 4 Nifti images and stack them together LoadImaged(keys=["image", "label"]), AsChannelFirstd(keys="image"), ConvertToMultiChannelBasedOnBratsClassesd(keys="label"), Spacingd( keys=["image", "label"], pixdim=pixdim, mode=("bilinear", "nearest"), ), Orientationd(keys=["image", "label"], axcodes="RAS"), RandSpatialCropd( keys=["image", "label"], roi_size=roi_size, random_size=False), RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=0), NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True), RandScaleIntensityd(keys="image", factors=0.1, prob=0.5), RandShiftIntensityd(keys="image", offsets=0.1, prob=0.5), ToTensord(keys=["image", "label"]), ] ) val_transform = Compose( [ LoadImaged(keys=["image", "label"]), AsChannelFirstd(keys="image"), ConvertToMultiChannelBasedOnBratsClassesd(keys="label"), Spacingd( keys=["image", "label"], pixdim=pixdim, mode=("bilinear", "nearest"), ), Orientationd(keys=["image", "label"], axcodes="RAS"), CenterSpatialCropd(keys=["image", "label"], roi_size=roi_size), NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True), ToTensord(keys=["image", "label"]), ] ) # + # !pip install -q "monai-weekly[nibabel, tqdm]" cache_num = 8 train_ds = DecathlonDataset( root_dir=root_dir, task="Task01_BrainTumour", transform=train_transform, section="training", download=True, num_workers=4, cache_num=cache_num, # it was 100 but we use larger volumes ) train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=2) val_ds = DecathlonDataset( root_dir=root_dir, task="Task01_BrainTumour", transform=val_transform, section="validation", download=False, num_workers=4, cache_num=cache_num, ) val_loader = DataLoader(val_ds, batch_size=2, shuffle=False, num_workers=2) # + slice_id = 32 # pick one image from DecathlonDataset to visualize and check the 4 channels print(f"image shape: {val_ds[2]['image'].shape}") plt.figure("image", (24, 6)) for i in range(4): plt.subplot(1, 4, i + 1) plt.title(f"image channel {i}") plt.imshow(val_ds[2]["image"][i, :, :, slice_id].detach().cpu(), cmap="gray") # plt.show() # also visualize the 3 channels label corresponding to this image print(f"label shape: {val_ds[2]['label'].shape}") plt.figure("label", (24, 6)) for i in range(3): plt.subplot(1, 3, i + 1) plt.title(f"label channel {i}") plt.imshow(val_ds[6]["label"][i, :, :, slice_id].detach().cpu()) plt.show() train_size = tuple(val_ds[6]['image'].shape[1:]) print(train_size) # + #from monai.networks.nets import UNETR as UNETR_monai from self_attention_cv import UNETR device = torch.device("cuda:0") num_heads = 10 # 12 normally embed_dim= 512 # 768 normally # model = UNETR(img_shape=tuple(roi_size), input_dim=4, output_dim=3, # embed_dim=embed_dim, patch_size=16, num_heads=num_heads, # ext_layers=[3, 6, 9, 12], norm='instance', # base_filters=16, # dim_linear_block=2048).to(device) model = UNet( dimensions=3, in_channels=4, out_channels=3, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, ).to(device) # model = UNETR_monai( # in_channels=4, # out_channels=3, # img_size=tuple(roi_size), # feature_size=16, # hidden_size=embed_dim, # mlp_dim=3072, # num_heads=12, # pos_embed="perceptron", # norm_name="instance", # res_block=True, # dropout_rate=0.0, # ).to(device) pytorch_total_params = sum(p.numel() for p in model.parameters())/1000000 print('Parameters in millions:',pytorch_total_params) # + import torch.nn as nn from monai.losses import DiceLoss, DiceCELoss loss_function = DiceCELoss(to_onehot_y=False, sigmoid=True) optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-5) max_epochs = 180 val_interval = 5 best_metric = -1 best_metric_epoch = -1 epoch_loss_values = [] metric_values = [] metric_values_tc = [] metric_values_wt = [] metric_values_et = [] torch.cuda.empty_cache() for epoch in range(max_epochs): print("-" * 10) print(f"epoch {epoch + 1}/{max_epochs}") model.train() epoch_loss = 0 step = 0 for batch_data in train_loader: step += 1 inputs, labels = ( batch_data["image"].to(device), batch_data["label"].to(device), ) optimizer.zero_grad() outputs = model(inputs) loss = loss_function(outputs, labels) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_loss /= step epoch_loss_values.append(epoch_loss) print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}") if (epoch + 1) % val_interval == 0: model.eval() with torch.no_grad(): dice_metric = DiceMetric(include_background=True, reduction="mean") post_trans = Compose( [Activations(sigmoid=True), AsDiscrete(threshold_values=True)] ) metric_sum = metric_sum_tc = metric_sum_wt = metric_sum_et = 0.0 metric_count = ( metric_count_tc ) = metric_count_wt = metric_count_et = 0 for val_data in val_loader: val_inputs, val_labels = ( val_data["image"].to(device), val_data["label"].to(device), ) val_outputs = model(val_inputs) val_outputs = post_trans(val_outputs) # compute overall mean dice value, not_nans = dice_metric(y_pred=val_outputs, y=val_labels) not_nans = not_nans.mean().item() metric_count += not_nans metric_sum += value.mean().item() * not_nans # compute mean dice for TC value_tc, not_nans = dice_metric( y_pred=val_outputs[:, 0:1], y=val_labels[:, 0:1] ) not_nans = not_nans.item() metric_count_tc += not_nans metric_sum_tc += value_tc.item() * not_nans # compute mean dice for WT value_wt, not_nans = dice_metric( y_pred=val_outputs[:, 1:2], y=val_labels[:, 1:2] ) not_nans = not_nans.item() metric_count_wt += not_nans metric_sum_wt += value_wt.item() * not_nans # compute mean dice for ET value_et, not_nans = dice_metric( y_pred=val_outputs[:, 2:3], y=val_labels[:, 2:3] ) not_nans = not_nans.item() metric_count_et += not_nans metric_sum_et += value_et.item() * not_nans metric = metric_sum / metric_count metric_values.append(metric) metric_tc = metric_sum_tc / metric_count_tc metric_values_tc.append(metric_tc) metric_wt = metric_sum_wt / metric_count_wt metric_values_wt.append(metric_wt) metric_et = metric_sum_et / metric_count_et metric_values_et.append(metric_et) if metric > best_metric: best_metric = metric best_metric_epoch = epoch + 1 torch.save( model.state_dict(), os.path.join(root_dir, "best_metric_model.pth"), ) print("saved new best metric model") print( f"current epoch: {epoch + 1} current mean dice: {metric:.4f}" f" tc: {metric_tc:.4f} wt: {metric_wt:.4f} et: {metric_et:.4f}" f"\nbest mean dice: {best_metric:.4f}" f" at epoch: {best_metric_epoch}" ) save_name = "./last.pth" torch.save(model.state_dict(),save_name) from google.colab import files files.download(save_name) print( f"train completed, best_metric: {best_metric:.4f}" f" at epoch: {best_metric_epoch}" ) # + # unet tutorial result: train completed, best_metric: 0.7537 at epoch: 160 # official tutorial # train completed, best_metric: 0.7660 at epoch: 170 # my run # self-attention-cv implementation of unetr # current epoch: 180 current mean dice: 0.7686 tc: 0.8116 wt: 0.8935 et: 0.6065 # best mean dice: 0.7686 at epoch: 180 # reduced version 50m params current epoch: 180 current mean dice: 0.7693 tc: 0.8161 wt: 0.8922 et: 0.6057 best mean dice: 0.7693 at epoch: 180 # unetr monai implementation # current epoch: 175 current mean dice: 0.7612 tc: 0.8122 wt: 0.8790 et: 0.5982 # + plt.figure("train", (12, 6)) plt.subplot(1, 2, 1) plt.title("Epoch Average Loss") x = [i + 1 for i in range(len(epoch_loss_values))] y = epoch_loss_values plt.xlabel("epoch") plt.plot(x, y, color="red") plt.subplot(1, 2, 2) plt.title("Val Mean Dice") x = [val_interval * (i + 1) for i in range(len(metric_values))] y = metric_values plt.xlabel("epoch") plt.plot(x, y, color="green") plt.show() plt.figure("train", (18, 6)) plt.subplot(1, 3, 1) plt.title("Val Mean Dice TC") x = [val_interval * (i + 1) for i in range(len(metric_values_tc))] y = metric_values_tc plt.xlabel("epoch") plt.plot(x, y, color="blue") plt.subplot(1, 3, 2) plt.title("Val Mean Dice WT") x = [val_interval * (i + 1) for i in range(len(metric_values_wt))] y = metric_values_wt plt.xlabel("epoch") plt.plot(x, y, color="brown") plt.subplot(1, 3, 3) plt.title("Val Mean Dice ET") x = [val_interval * (i + 1) for i in range(len(metric_values_et))] y = metric_values_et plt.xlabel("epoch") plt.plot(x, y, color="purple") plt.show() # - model.load_state_dict( torch.load(os.path.join(root_dir, "best_metric_model.pth")) ) model.eval() with torch.no_grad(): # select one image to evaluate and visualize the model output val_input = val_ds[6]["image"].unsqueeze(0).to(device) val_output = model(val_input) plt.figure("image", (24, 6)) for i in range(4): plt.subplot(1, 4, i + 1) plt.title(f"image channel {i}") plt.imshow(val_ds[6]["image"][i, :, :, 20].detach().cpu(), cmap="gray") plt.show() # visualize the 3 channels label corresponding to this image plt.figure("label", (18, 6)) for i in range(3): plt.subplot(1, 3, i + 1) plt.title(f"label channel {i}") plt.imshow(val_ds[6]["label"][i, :, :, 20].detach().cpu()) plt.show() # visualize the 3 channels model output corresponding to this image plt.figure("output", (18, 6)) for i in range(3): plt.subplot(1, 3, i + 1) plt.title(f"output channel {i}") out_tensor = torch.sigmoid(val_output[0, i, :, :, 20]).detach().cpu() plt.imshow(out_tensor) plt.show()
icml-unetr-2022.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: plots # language: python # name: plots # --- # ## About # This page showcases the work of [Tuo Wang](https://tuowang.rbind.io/) that introduces packages to make [`ggplot2`](https://ggplot2.tidyverse.org/) plots more beautiful. You can find the original [R](https://www.r-graph-gallery.com/) code on Tuo's blog [here](https://tuowang.rbind.io/post/2021-03-25-ggplot2-extensions/). # # Thanks to him for accepting sharing his work here! Thanks also to [<NAME>](https://tcapretto.netlify.app/) who translated this work from R to Python! 🙏🙏 # ## Load libraries # # Today's visualization is made with matplotlib and the library [`adjustText`](https://github.com/Phlya/adjustText) to automatically adjust the position of labels in the plot. In addition, `load_penguins()` is imported from the palmerpenguins library to have access to the popular palmerpenguins dataset, and the `Faker` class is imported from the [`faker`](https://faker.readthedocs.io/) library to generate random names. # >Note: the `adjustText` library can be installed with `pip install adjustText` # + import matplotlib.pyplot as plt import numpy as np from adjustText import adjust_text from faker import Faker from palmerpenguins import load_penguins # - # ## Load and prepare data # # The dataset used today was collected and made available by [Dr. <NAME>](https://www.uaf.edu/cfos/people/faculty/detail/kristen-gorman.php) and the [Palmer Station, Antarctica LTER](https://pal.lternet.edu/), a member of the [Long Term Ecological Research Network](https://lternet.edu/). This dataset was popularized by [<NAME>](https://www.allisonhorst.com/) in her R package [`palmerpenguins`](https://allisonhorst.github.io/palmerpenguins/) with the goal to offer an alternative to the iris dataset for data exploration and visualization. penguins = load_penguins() penguins.head() # Today's vizualisation does not require much data preparation. The only preparation step is to drop observations with missing values. penguins = penguins.dropna() # ## Basic Scatterplot # # Let's start by defining some important values that are used throughout the plot. # + # Colors BG_WHITE = "#fbf9f4" GREY_LIGHT = "#b4aea9" GREY50 = "#7F7F7F" GREY30 = "#4d4d4d" BLUE_DARK = "#1B2838" BLUE = "#2a475e" COLORS = ["#386cb0", "#fdb462", "#7fc97f" ] # A color for each species # A list with the species names SPECIES = sorted(penguins["species"].unique()) # Horizontal lines HLINES = [40, 50, 60] # Vertical and horizontal tick marks YTICKS = [40, 50, 60] XTICKS = [160, 170, 180, 190, 200, 210, 220, 230, 240] # List of random names # Generate Faker instance and set seed or reproducibility faker = Faker() # Set seed for reproducibility of the random names faker.seed_instance(11) NAMES = [faker.first_name() for i in range(len(penguins))] # The markers we use in the scatterplot MARKERS = ["o", "^", "s"] # circle, triangle, square # - # Then, let's initialize the chart layout, add a background color and the basic scatterplot: # + # Initialize layout ---------------------------------------------- fig, ax = plt.subplots(figsize= (14, 10)) # Background color fig.patch.set_facecolor(BG_WHITE) ax.set_facecolor(BG_WHITE) # Horizontal lines in the background for h in HLINES: ax.axhline(h, color=GREY50, ls=(0, (5, 5)), alpha=0.8, zorder=0) # Add scatterplot ----------------------------------------------- # Loop through species, colors, and markers. # Each species receives a different combination of color and marker. for species, color, marker in zip(SPECIES, COLORS, MARKERS): data = penguins[penguins["species"] == species] ax.scatter( "flipper_length_mm", "bill_length_mm", s=50, color=color, marker=marker, alpha=0.8, data=data ) # - # That's a pretty good start, but let's make it better! # ## Add labels with no overlap # # What's truely missing here are __labels__. It's very frustrating not knowing which item is hidden under a data point, isn't it!? # # It is pretty challenging to add many labels on a plot since labels tend to overlap each other, making the figure unreadable. Fortunately, the `adjustText` package is here to help us. It provides an algorithm that will automatically place the labels for us. Let's do it! # + # Add name labels ------------------------------------------------ # Only names that start with the letter "C" are added. # `ax.text()` outputs are appended to the `TEXTS` list. # This list is passed to `adjust_text()` to repel the labels and add arrows. TEXTS = [] for i in range(len(penguins)): if NAMES[i].startswith("C"): x = penguins["flipper_length_mm"].iloc[i] y = penguins["bill_length_mm"].iloc[i] text = NAMES[i] TEXTS.append(ax.text(x, y, text, color=GREY30, fontsize=14, fontname="Poppins")) # Adjust text position and add arrows ---------------------------- # 'expand_points' is a tuple with two multipliers by which to expand # the bounding box of texts when repelling them from points # 'arrowprops' receives a dictionary with all the properties we want # for the arrows adjust_text( TEXTS, expand_points=(2, 2), arrowprops=dict( arrowstyle="->", color=GREY50, lw=2 ), ax=fig.axes[0] ) fig # - # Note the `ax=fig.axes[0]` in the `adjust_text()` function call. That is not always necessary. In this case, the plot is built in several steps for demonstration purposes and so the axis object must be passed explicitly. When `ax` isn't explicitly passed, `adjust_text()` will use `plt.gca()` to obtain the axis. If all the code used to generate your plot goes in the same chunk of code than `adjust_text)`, then there's no need to pass any axis. # ## Final chart # The chart above is pretty close from being publication ready. What's needed now is a good title, a legend to make color and shapes more insightful, and some axis customization: # + # Add title and subtitle ----------------------------------------- # Title fig.suptitle( "Palmer Penguins Data Visualization", x = 0.122, y = 0.975, ha="left", fontsize=32, fontname="Lobster Two", color=BLUE, weight="bold", ) # Subtitle ax.set_title( "Scatter plot of flipper length vs bill length", loc="left", ha="left", fontsize=24, fontname="Lobster Two", color=BLUE_DARK, weight="bold", pad=10 ) # Add legend ----------------------------------------------------- # We store the result of `ax.legend()` for further customizations legend = ax.legend( loc=(0.85, 0.025), # bottom-right labelspacing=1.5, # add space between labels markerscale=1.5, # increase marker size frameon=False # don't put a frame ) # Iterate through text elements and species names for text, species in zip(legend.get_texts(), SPECIES): text.set_text(species) # Set species name text.set_fontname("Roboto") # Change default font text.set_fontsize(16) # Change default font size # Grab title and customize its appearence. legend.set_title("Species") legend_title = legend.get_title() legend_title.set_fontname("Roboto") legend_title.set_fontweight("bold") legend_title.set_fontsize(18) legend_title.set_ha("left") # Customize layout ----------------------------------------------- # Hide spines ax.spines["right"].set_color("none") ax.spines["top"].set_color("none") # Customize spines color ax.spines["left"].set_color(GREY_LIGHT) ax.spines["left"].set_linewidth(2) ax.spines["bottom"].set_color(GREY_LIGHT) ax.spines["bottom"].set_linewidth(2) # Customize labels and ticks ax.tick_params(length=0) ax.set_yticks(YTICKS) ax.set_yticklabels(YTICKS, fontname="Lobster Two", size=15) ax.set_ylabel("bill Length (mm)", fontname="Lobster Two", size=18, weight="bold") ax.set_xticks(XTICKS) ax.set_xticklabels(XTICKS, fontname="Lobster Two", size=15) ax.set_xlabel("flip length (mm)", fontname="Lobster Two", size=18, weight="bold") fig
src/notebooks/web-text-repel-with-matplotlib.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.4.0 # language: julia # name: julia-1.4 # --- # # JuliaでBit全探索を書く時にはdigitsを使うと便利。 # > Julia言語でBit全探索を実装します。 # # - toc: true # - badges: true # - comments: true # - categories: [AtCoder] # - image: images/chart-preview.png # # # digitsについて # digits(a, base=b, pad=c)は、10進数の整数aをc桁のb進数に変換した配列を返します。 # # # 例えば, n = 22 println(digits(n, base=2, pad=6)) # $22 = 10110_2$ なので、正しいですね! 桁数が足りない場合は0で埋められます。 # Juliaでは、これを利用して、Bit全探索を用意に実装できます。 # # (Bit全探索については、https://algo-logic.info/rec-bit-search/ がわかりやすいのでおすすめです) # # 具体的には以下のようなコードです。 N = 4 for i in 0:2^N - 1 pettern = digits(i, base=2, pad=N) println(pettern) end # あとはこの各パターンについて # # `1 -> True`, # `0 -> False` # # と見做して処理を行えば良いです。具体的に問題を解いてみます。 # #  部分和問題 # 部分和問題とは、 # n 個の整数 $a_1,...,a_n$ からなる配列Aと, # # 整数 S が与えられた時、適切な部分集合を選んで、総和をSとすることができるかを判定する問題です。 # # # 例えば, # # ```julia # A = [1, 2, 4, 5] # S = 8 # ``` # # の時, # # # $A_1 + A_2 + A_3 = 8$ なので、適切な部分集合を選んで総和をSとすることができました。 # # nが小さい時は、bit全探索を用いることで実用的な速度で解くことができます。 # この記事で最初に解説したように, digitsを用いて全てのパターンを列挙します。 N = 4 for i in 0:2^N - 1 pettern = digits(i, base=2, pad=N) println(pettern) end # ここで0を `false` (つまりそれを選択しない), # # 1を`true`(それを選択する)とみなした時、 # # そのパターンを表す配列を $P$ とすると、選択した要素の和は, # # # $$ # dot(A, P) = (A_1 * P_1 + A_2 * P_2 + ... + A_N * P_N) # $$ # # なので、 # # # $dot(A, P)$ と表すことができます。 # したがって、部分和問題は次のようなコードで解けます。 # # とてもシンプルですね! # + using LinearAlgebra function solve(N, A, S) for i in 0:2^N - 1 P = digits(i, base=2, pad=N) if dot(A, P) == S return "OK, P = $P" end end return "NO" end # + N = 4 A = rand(1:10, N) S = rand(1:40) @show N @show A @show S solve(N, A, S) # - # 計算量は、内積に$O(N)$かかり、パターンの数が$2^N$個あることから、 # # $O(N2^N)$ # # です。 # # そのため、Nが大きくなると計算時間がすごいことになります。実験してみます。 using Plots; plotly() using BenchmarkTools # + # 最悪の計算量が知りたいので、絶対に「"No"」になるようなケースについて調べます。 # Aは全て0, 和が1とします。 function benchmark(N) times = zeros(N) S = 1 for i in 1:N A = zeros(Int, i) benchmark = @benchmark solve($i, $A, $S) times[i] = mean(benchmark.times) end return times end # - result = benchmark(16) p = plot(result, yaxis=:log) xlabel!("N") ylabel!("Time [ns]") # お手本のような指数関数ありがとうございます。Nが大きくなるにつれ計算量も爆発的に大きくなるので、使う時は気をつけましょう。 clipboard(string(sprint(show, "text/html", p)))
_notebooks/2020-12-20-digits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Spacy and SVM # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" import numpy as np import pandas as pd import spacy from spacy.matcher import Matcher from spacy.tokens import Span from spacy import displacy # - nlp=spacy.load("en_core_web_sm") train=pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test=pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train.head() # + from spacy.lang.en.stop_words import STOP_WORDS stopwords = list(STOP_WORDS) import string punct=string.punctuation def text_data_cleaning(sentence): doc = nlp(sentence) tokens = [] for token in doc: if token.lemma_ != "-PRON-": temp = token.lemma_.lower().strip() else: temp = token.lower_ tokens.append(temp) cleaned_tokens = [] for token in tokens: if token not in stopwords and token not in punct: cleaned_tokens.append(token) return cleaned_tokens # - from sklearn.svm import LinearSVC from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, classification_report, confusion_matrix tfidf = TfidfVectorizer(tokenizer = text_data_cleaning) classifier = LinearSVC() x = train['text'] y = train['target'] X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 42) clf = Pipeline([('tfidf', tfidf), ('clf', classifier)]) clf.fit(X_train,y_train) y_pred = clf.predict(X_test) print(classification_report(y_test, y_pred)) y_pred=clf.predict(test['text']) # # BERT # !wget --quiet https://raw.githubusercontent.com/tensorflow/models/master/official/nlp/bert/tokenization.py # + import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras.layers import Dense, Input from tensorflow.keras.optimizers import Adam from tensorflow.keras.models import Model from tensorflow.keras.callbacks import ModelCheckpoint import tensorflow_hub as hub import tokenization # - def bert_encode(texts, tokenizer, max_len=512): all_tokens = [] all_masks = [] all_segments = [] for text in texts: text = tokenizer.tokenize(text) text = text[:max_len-2] input_sequence = ["[CLS]"] + text + ["[SEP]"] pad_len = max_len - len(input_sequence) tokens = tokenizer.convert_tokens_to_ids(input_sequence) tokens += [0] * pad_len pad_masks = [1] * len(input_sequence) + [0] * pad_len segment_ids = [0] * max_len all_tokens.append(tokens) all_masks.append(pad_masks) all_segments.append(segment_ids) return np.array(all_tokens), np.array(all_masks), np.array(all_segments) def build_model(bert_layer, max_len=512): input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids") input_mask = Input(shape=(max_len,), dtype=tf.int32, name="input_mask") segment_ids = Input(shape=(max_len,), dtype=tf.int32, name="segment_ids") _, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids]) clf_output = sequence_output[:, 0, :] out = Dense(1, activation='sigmoid')(clf_output) model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out) model.compile(Adam(lr=2e-6), loss='binary_crossentropy', metrics=['accuracy']) return model # %%time module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1" bert_layer = hub.KerasLayer(module_url, trainable=True) vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy() do_lower_case = bert_layer.resolved_object.do_lower_case.numpy() tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case) train_input = bert_encode(train.text.values, tokenizer, max_len=160) test_input = bert_encode(test.text.values, tokenizer, max_len=160) train_labels = train.target.values model = build_model(bert_layer, max_len=160) model.summary() train_history = model.fit( train_input, train_labels, validation_split=0.2, epochs=10, batch_size=16 )
Fake-Disaster-Tweet-Detection-Spacy-Bert-SVM/bert-spacy-svm.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,py:light # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from typing import List Vector = List[float] def add(v: Vector, w: Vector) -> Vector: """Adds corresponding elements""" assert len(v) == len(w), "vectors must be the same length" return [v_i + w_i for v_i, w_i in zip(v, w)] assert add([1, 2, 3], [4, 5, 6]) == [5, 7, 9] def subtract(v: Vector, w: Vector) -> Vector: """Subtracts corresponding elements""" assert len(v) == len(w), "Vectors must be the same length" return [v_i - w_i for v_i, w_i in zip(v, w)] assert subtract([5, 7, 9], [4, 5, 6]) == [1, 2, 3] def vector_sum(vectors:List[Vector]) -> Vector: """Sums all corresponding elements""" # check that vector is not empty assert vectors, "no vectors provided!" # check that vectors are all the same size num_elements = len(vectors[0]) assert all(len(v) == num_elements for v in vectors), "different sizes!" # the i-th element of the result is the sum of every vector[i] return [sum(vector[i] for vector in vectors) for i in range(num_elements)] assert vector_sum([[1,2], [3, 4], [5, 6], [7,8]]) == [16,20] def scalar_multiply(c: float, v: Vector) -> Vector: """Multiplies every element by c """ return [c * v_i for v_i in v] assert scalar_multiply(2, [1, 2, 3]) == [2, 4, 6] def vector_mean(vectors: List[Vector]) -> Vector: """Computes the element-wise average""" n = len(vectors) return scalar_multiply(1/n, vector_sum(vectors)) assert vector_mean([[1,2], [3, 4], [5, 6]]) == [3, 4] # A less obvious tool is the dot product. The dot product of two vectors is the sum of # their componentwise products: def dot(v: Vector, w: Vector) -> float: """Computes v_1 * w_1 + ... + v_n * w_n""" assert len(v) == len(w), "vectors must be same length" return sum(v_i * w_i for v_i, w_i in zip(v, w)) assert dot([1, 2, 3], [4, 5, 6]) == 32 def sum_of_squares(v: Vector) -> float: """Returns v_1 * v_1 + ... + v_n * v_n""" return dot(v, v) assert sum_of_squares([1, 2, 3]) == 14 # + import math # Which we can use to compute its magnitude (or length): def magnitude(v: Vector) -> float: """Returns the magnitude ( or length) of v""" return math.sqrt(sum_of_squares(v)) # - assert magnitude([3, 4]) == 5 # + # We now have all the pieces we need to compute the distance between two vectors def squared_distance(v: Vector, w: Vector) -> float: """Computes (v_1 - w_1)**2 + ... + (v_n - w_n)**2 """ return sum_of_squares(subtract(v, w)) # - def distance(v: Vector, w: Vector) -> float: """Computes the distance between v and w""" return math.sqrt(squared_distance(v, w)) Matrix = List[List] from typing import Tuple A = [[1, 2, 3],[4, 5, 6]] def shape(A: Matrix) -> Tuple: """Returns (# of rows of A, # of columns of A)""" num_rows = len(A) num_cols = len(A[0]) if A else 0 # number of elements in first row, 0 is returns if empty lists of list are present return num_rows, num_cols assert shape(A) == (2, 3) # + def get_row(A: Matrix, i: int) -> Vector: """Returns the i-th row of A (as a Vector)""" return A[i] def get_column(A: Matrix, j:int) -> Vector: """Returns the j-th columns of A (as a Vector)""" return [A_i[j] # jth element of row vector A_i for A_i in A] # for each row vector # - assert get_row(A, 1) == [4, 5, 6] assert get_column(A,1) == [2, 5] from typing import Callable def make_matrix(num_rows: int, num_cols: int, entry_fn: Callable[[int, int], float]) -> Matrix: """Returns a num_rows x num_cols matrix(i, j) whose (i,j)- th entry is entry_fn(i, j)""" return [[entry_fn(i, j) # entry_fn on each i,j pair for j in range(num_cols)] # entry_fn(i,0),...,entry_fn(i,j) for i in range(num_rows)] # create one list for each i def identity_matrix(n: int) -> Matrix: """Returns the n x n identity matrix""" return make_matrix(n, n, lambda i,j: 1 if i==j else 0) assert identity_matrix(5) == [[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]]
.ipynb_checkpoints/1_linear_algebra-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.lib.deepreload import reload # %load_ext autoreload # %autoreload 2 # + import re import operator from pathlib import Path import numpy as np import matplotlib.pyplot as plt import pandas as pd import pydicom from pymedphys_analysis.tpscompare import absolute_scans_from_mephysto from pymedphys_dicom.dicom.dose import dicom_dose_interpolate, xyz_axes_from_dataset # + ROOT_DIR = Path(r"S:\Physics\Monaco\Model vs Measurement Comparisons") DICOM_DIR = ROOT_DIR.joinpath(r"Beam Models\Photon Monte Carlo with Flattening Filter") MEASUREMENTS_DIR = ROOT_DIR.joinpath(r"Measurements\RCCC\Photons\With Flattening Filter") RESULTS = ROOT_DIR.joinpath(r"Results\RCCC\internal") # + mephysto_files = list(MEASUREMENTS_DIR.glob('*.mcc')) matches = [ re.match('06MV (\d\dx\d\d) Open.mcc', filepath.name) for filepath in mephysto_files ] keys = [ match.group(1) for match in matches if match ] mephysto_file_map = { key: filepath for key, filepath in zip(keys, mephysto_files) } mephysto_file_map # + dicom_file_map = { key: DICOM_DIR.joinpath(f'06MV_{key}.dcm') for key in keys } dicom_file_map # - dicom_dataset_map = { key: pydicom.read_file(str(dicom_file_map[key]), force=True) for key in keys } absolute_dose_table = pd.read_csv(MEASUREMENTS_DIR.joinpath('AbsoluteDose.csv'), index_col=0) absolute_dose = absolute_dose_table['d10 @ 90 SSD']['6 MV'] absolute_dose output_factors = pd.read_csv(MEASUREMENTS_DIR.joinpath('OutputFactors.csv'), index_col=0) output_factors # + absolute_dose_per_field = { key: output_factors[key]['6 MV'] * absolute_dose for key in keys } absolute_dose_per_field # + absolute_scans_per_field = { key: absolute_scans_from_mephysto( mephysto_file_map[key], absolute_dose_per_field[key], 100) for key in keys } # absolute_scans_per_field # - getter = operator.itemgetter('displacement', 'dose') for key in keys: plt.plot(*getter(absolute_scans_per_field[key]['depth_dose'])) # + def plot_tps_meas_diff(displacement, meas_dose, tps_dose): diff = tps_dose - meas_dose diff_range = np.max(np.abs(diff)) lines = [] fig, ax1 = plt.subplots(figsize=(12,6)) lines += ax1.plot(displacement, meas_dose, label='Measured Dose') lines += ax1.plot(displacement, tps_dose, label='TPS Dose') ax1.set_ylabel('Dose (Gy / 100 MU)') x_bounds = [np.min(displacement), np.max(displacement)] ax1.set_xlim(x_bounds) ax2 = ax1.twinx() lines += ax2.plot(displacement, diff, color='C3', alpha=0.5, label='Residuals [TPS - Meas]') ax2.plot(x_bounds, [0, 0], '--', color='C3', lw=0.5) ax2.set_ylim([-diff_range, diff_range]) ax2.set_ylabel('Dose difference [TPS - Meas] (Gy / 100 MU)') labels = [l.get_label() for l in lines] ax1.legend(lines, labels) # fig.tight_layout() # + def plot_pdd_diff(key): depth, meas_dose = getter(absolute_scans_per_field[key]['depth_dose']) y = depth - 300 tps_dose = np.squeeze(dicom_dose_interpolate(dicom_dataset_map[key], ([0], y, [0]))) / 10 plot_tps_meas_diff(depth, meas_dose, tps_dose) plt.title(f'Depth Dose Comparisons | {key} field') for key in keys: plot_pdd_diff(key) filename = RESULTS.joinpath(f'{key}_pdd.png') plt.savefig(filename) plt.show() # + def plot_profile_diff(key, depth, direction): displacement, meas_dose = getter(absolute_scans_per_field[key]['profiles'][depth][direction]) y = [depth - 300] if direction is 'inplane': grid = (displacement, y, [0]) elif direction is 'crossplane': grid = ([0], y, displacement) else: raise ValueError("Expected direction to be equal to 'inplane' or 'crossplane'") tps_dose = np.squeeze(dicom_dose_interpolate(dicom_dataset_map[key], grid)) / 10 plot_tps_meas_diff(displacement, meas_dose, tps_dose) plt.title(f'{direction.capitalize()} Profile Comparisons | {key} field | Depth: {depth} mm') for key in keys: depths = absolute_scans_per_field[key]['profiles'].keys() for depth in depths: for direction in ['inplane', 'crossplane']: plot_profile_diff(key, depth, direction) filename = RESULTS.joinpath(f'{key}_profile_{depth}mm_{direction}.png') plt.savefig(filename) plt.show() # -
examples/archive/tpscompare/02-tpscompare.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from mplsoccer.pitch import Pitch from mplsoccer.statsbomb import read_event, EVENT_SLUG import os # load first game that Messi played as a false-9 kwargs = {'related_event_df': False,'shot_freeze_frame_df': False, 'tactics_lineup_df': False} df_false9 = read_event(os.path.join(EVENT_SLUG,'69249.json'), **kwargs)['event'] # filter messi's actions (starting positions) df_false9 = df_false9.loc[df_false9.player_id == 5503,['x', 'y']] # plotting pitch = Pitch(pitch_type = 'statsbomb', pitch_color = 'grass', stripe = True, view = 'half', pad_left = 20) joint_kws = {'shade': False, 'color': 'green', 'cmap': "plasma", 'linewidths': 3} g = pitch.jointplot(df_false9.x, df_false9.y, height = 9, kind='kde',**joint_kws); g.fig.suptitle("Messi's first game as a false 9", x = 0.5, y = 1.03, fontsize = 25, ha = 'center', va = 'center') g.savefig(os.path.join('figures', 'README_jointplot_example.png'), bbox_inches = 'tight')
docs/06-Plotting-jointplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- BRANCH="main" # + """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. """ # If you're using Google Colab and not running locally, run this cell # install NeMo # !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp] # - # # Introduction # # In this notebook we demonstrate how to use p-tuning and prompt tuning within NeMo-Megatron. Both methods are parameter efficient alternatives to fine-tuning pretrained language models. Our NeMo implementation makes it possible to use one pretrained GPT model on many downstream tasks without needing to tune the model’s full set of parameters. It also allows for adding new tasks to your model without overwriting or disrupting previous tasks for which the model has already been p-tuned/prompt-tuned. Because the original model parameters are frozen and never altered by either method, p-tuning/prompt-tuning also avoid cartographic forgetting issues often encountered when fine-tuning models. # # - Our prompt tuning implementation is based off Lester et. al’s EMNLP 2021 paper [The Power of Scale for Parameter-Efficient Prompt Tuning](https://arxiv.org/abs/2104.08691) # # - Our p-tuning implementation is based off Liu et al's paper [GPT Understands, Too](https://arxiv.org/abs/2103.10385). # # - Usage examples and API documentation can be found in [our user docs](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/prompt_learning.html). # # <img src="images/prompt_learning_forward_pass.png" alt="Prompt Learning Forward Pass"/> # # Our continuous learning capability for combined p-tuning and prompt tuning with GPT style models is a NeMo specific extension of the author’s original work. # # # The Plan # # We are going to show you how to: # # 1. P-Tune/Prompt Tune a model on multiple tasks at the same time # 2. Add a new task to a model that has already been P-Tuned/Prompt Tuned previously # # We will first p-tune a GPT model on sentiment analysis, and intent and slot classification tasks. Then we will show how to add the squad question answering task to the same model we already p-tuned once. # # # # Technical Overview # Instead of selecting discrete text prompts in a manual or automated fashion, prompt tuning and p-tuning utilize virtual prompt embeddings that can be optimized via gradient decent. The only difference between prompt tuning and p-tuning within NeMo-Megatron is the architecture used to tune the soft prompt tokens during training. # # ### Terminology # We will be using the terms `continuous`, `soft`, and `virtual` token interchangeably to refer to embeddings inserted into the model prompt that have no concrete mapping to strings or characters within the model’s vocabulary. These virtual token embeddings exist in contrast to the `discrete`, `hard`, or `real` tokens that do make up the model’s vocabulary. Virtual tokens are purely 1D vectors with dimensionality equal to that of each real token embedding, matching the `hidden_size` hyperparameter. In training and inference, continuous token embeddings are inserted among discrete token embeddings according to a template you provide in the model’s config. We will demonstrate how to do this below. # # When referring to p-tuning and prompt tuning together, we will be using the phrase prompt learning for simplicity. # # ### Prompt-Tuning # In prompt-tuning a pretrained GPT model, soft prompt embeddings are initialized as a 2D matrix of size `total_virtual_tokens X hidden_size`. Each task the model is prompt-tuned to perform has its own 2D embedding matrix associated with it. Tasks do not share any parameters during training or inference. All GPT model parameters are frozen and only the embedding parameters for each task are updated during training. # # In prompt tuning you can specify how the embeddings are initialized for each task. You can either # # 1. Initialize embedding parameters according to some random distribution # 2. Initialize embedding parameters from existing vocabulary embeddings (recommended) # # If you choose to initialize virtual token embeddings from existing embedding weights, you can provide the string of words you want to use for initialization in the model’s config. This string will be tokenized and tiled or truncated to match the specified number of virtual tokens you would like to use (`total_virtual_tokens`). Vocab embeddings are copied and used to initialize the soft prompt embedding matrix for each task. The vocab embeddings themselves are not updated or changed during prompt tuning. # # # ### P-Tuning # In p-tuning, an LSTM model is used to predict virtual token embeddings. We refer to this LSTM model as our `prompt_encoder`. LSTM parameters are randomly initialized at the start of p-tuning. All GPT model parameters are frozen, and only the LSTM weights are updated at each training step. LSTM parameters are shared between all tasks that are p-tuned at the same time, but the LSTM model outputs unique virtual token embeddings for each task. The virtual tokens predicted by the LSTM are inserted among the discrete token input in the exact same manner as with prompt-tuning. You still specify the number of virtual tokens you want to use by setting `total_virtual_tokens` and each virtual token embedding is still a 1D vector of size `hidden_size`. # # # # # The Best of Both # A single pretrained GPT model can use both p-tuning and prompt-tuning. While you must decide to use either p-tuning or prompt-tuning for each task you want your model to perform, you can p-tune your model on a set of tasks A, then prompt tune your same model on a different set of tasks B, then finally run inference on tasks from both A and B at the same time. During prompt-tuning or p-tuning, tasks tuned at the same time must use the same number of virtual tokens. During inference, tasks using differing amounts of virtual tokens can be run at the same time. # # Please see our [docs for more comparisons between prompt and p-tuning](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/prompt_learning.html). # # With all that covered, let's get started! # import os import wget import pathlib # # Tasks and Datasets # We will be using p-tuning to teach our GPT model to do 3 tasks: **Sentiment Analysis**, **Question Answering** and **Intent and Slot Classification**. # # We will use [Financial PhraseBank dataset](https://huggingface.co/datasets/financial_phrasebank) for our sentiment analysis task, [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) for question answering task, and the [Assistant Benchmarking Dataset](https://github.com/xliuhw/NLU-Evaluation-Data) for intent and slot classification. # # - The [Financial PhraseBank dataset](https://huggingface.co/datasets/financial_phrasebank) contains the sentiments for financial news headlines from the perspective of a retail investor. Further details about the dataset can be found in Malo et. al's "[Good Debt or Bad Debt: Detecting Semantic Orientations in Economic Texts](https://arxiv.org/abs/1307.5336)" # # # - [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) is a reading comprehension dataset, consisting of questions posed by crowd workers on a set of Wikipedia articles, where the answer to every question is a segment of text. More information on [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) can be found on their website or in their paper by Rajpurkar et. al "[Know What You Don’t Know: Unanswerable Questions for SQuAD](https://arxiv.org/pdf/1806.03822.pdf)". # # # - The [Assistant Benchmarking Dataset](https://github.com/xliuhw/NLU-Evaluation-Data) is a natural language dataset for in home human-robot interaction. Details on the dataset can be found in Liu et. al's "[Benchmarking Natural Language Understanding Services for building Conversational Agents](https://arxiv.org/abs/1903.05566)" # # Each of these tasks require different types of natural language understanding and lie in different domains. We will demonstrate how to use one model to perform all of them. # # Data Preparation # # The prompt learning dataset loader accepts a list of json/dictionary objects or a list of json file names where each json file contains a collection of json objects. Each json object must include the field `taskname` which is a string identifier for the task the data example corresponds to. They should also include one or more fields corresponding to different sections of the discrete text prompt. The input data might look like: # # ``` # [ # {"taskname": "squad", "context": [CONTEXT_PARAGRAPH_TEXT1], "question": [QUESTION_TEXT1], "answer": [ANSWER_TEXT1]}, # {"taskname": "squad", "context": [CONTEXT_PARAGRAPH_TEXT2], "question": [QUESTION_TEXT2], "answer": [ANSWER_TEXT2]}, # {"taskname": "intent_and_slot", "utterance": [UTTERANCE_TEXT1], "label": [INTENT_TEXT1][SLOT_TEXT1]}, # {"taskname": "intent_and_slot", "utterance": [UTTERANCE_TEXT2], "label": [INTENT_TEXT2][SLOT_TEXT2]}, # {"taskname": "sentiment", "sentence": [SENTENCE_TEXT1], "label": [SENTIMENT_LABEL1]}, # {"taskname": "sentiment", "sentence": [SENTENCE_TEXT2], "label": [SENTIMENT_LABEL2]}, # ] # ``` # # These additional fields can be unlimited in number and will be used to help map different parts of the discrete text input to a prompt template that you define. We will show how this mapping works and how to construct your prompt template in the `Prompt Formatting` section. # + # You can replace DATA_DIR and NEMO_DIR with your own locations DATA_DIR = "data" NEMO_DIR = '.' os.makedirs(DATA_DIR, exist_ok=True) # - # # For each dataset we have preprocessing scripts pre-written in NeMo's example directory located in `examples/nlp`. Let's download those now. # download the preprocessing scripts from github for the purpose of this tutorial wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/scripts/dataset_processing/nlp/financial_phrase_bank/prompt_learning_financial_phrase_bank_preprocessing.py', NEMO_DIR) wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/scripts/dataset_processing/nlp/squad/prompt_learning_squad_preprocessing.py', NEMO_DIR) wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/scripts/dataset_processing/nlp/intent_and_slot/prompt_learning_assistant_preprocessing.py', NEMO_DIR) wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/scripts/dataset_processing/nlp/intent_and_slot/assistant_utils.py', NEMO_DIR) # Now let's down load and process each dataset. # ### Financial PhraseBank Dataset # + # Download the financial phrase bank dataset # !wget https://www.researchgate.net/profile/Pekka_Malo/publication/251231364_FinancialPhraseBank-v1.0/data/0c96051eee4fb1d56e000000/FinancialPhraseBank-v1.0.zip # If you are having issues with the research gate link above, copy and paste it in your browser # and the file should download automatically. Then place it in the same directory in which # you are running this notebook. # - # If you are having issues with the research gate link above, copy and paste it in your browser and the file should download automatically. Then place it in the same directory in which you are running this notebook. # !unzip FinancialPhraseBank-v1.0.zip -d {DATA_DIR} # What the financial phrase bank dataset looks like before processing SENTIMENT_DIR = os.path.join(DATA_DIR, "FinancialPhraseBank-v1.0") # !head -4 $SENTIMENT_DIR/Sentences_AllAgree.txt # Preprocess financial phrase bank dataset # !python $NEMO_DIR/prompt_learning_financial_phrase_bank_preprocessing.py # What the financial phrase bank dataset looks like after processing # !head -4 $SENTIMENT_DIR/financial_phrase_bank_train.jsonl # Our financial phrase bank preprocessing script converted the raw text file of sentences and labels into three `.jsonl` files for training, validation, and testing. Each line in the files contains a json object with the fields `taskname`, `sentiment`, `sentence`, and `label`. You can inspect the preprocessing script and play with different arguments for the script by looking at and running `prompt_learning_financial_phrase_bank_preprocessing.py` which should currently be downloaded in `NEMO_DIR`. It is also located at `scripts/dataset_processing/nlp/financial_phrase_bank/prompt_learning_financial_phrase_bank_preprocessing.py` in the NeMo repo. # # By default 80% of the data was randomly selected for the training set, 10% for the validation set, and 10% for the test set. We only used training examples with 100% agreement from labelers on the correct sentiment label. This data is from `Sentences_AllAgree.txt`. This should result in `1811` training examples, `226` validation examples, and `227` examples for testing. The `label` field was removed from test examples. # # If you want to try using more data, you can combine the `Sentences_AllAgree.txt` with any of the `Sentences_75Agree.txt`, `Sentences_66Agree.txt` and/or `Sentences_50Agree.txt` by creating a new cell below and running: # # ``` # # !cat {SENTIMENT_DIR}/Sentences_AllAgree.txt {SENTIMENT_DIR}/Sentences_75Agree.txt >> {SENTIMENT_DIR}/combined_data.txt # # !python $NEMO_DIR/prompt_learning_financial_phrase_bank_preprocessing.py --file-name combined_data.txt # ``` # # ### SQuAD Dataset # + SQUAD_DIR = os.path.join(DATA_DIR, "SQuAD") os.makedirs(SQUAD_DIR, exist_ok=True) # Download the SQuAD dataset # !wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json # !mv train-v2.0.json {SQUAD_DIR} # - # Preprocess squad data # !python $NEMO_DIR/prompt_learning_squad_preprocessing.py --data-dir {SQUAD_DIR} # What the squad dataset looks like after processing # !head -4 $SQUAD_DIR/squad_train.jsonl # We made a `.jsonl` file for each of the train, validation, and testing splits of the squad data. Every `.jsonl` file contains json objects with the fields `taskname`, `context`, `question`, and `answer`. The preprocessing script is called `prompt_learning_squad_preprocessing.py`. It should be in your `NEMO_DIR` and at `scripts/dataset_processing/nlp/squad/prompt_learning_squad_preprocessing.py` in the NeMo repo. # # The SQuAD dataset consists of various topics like `Beyoncé`, `IPod`, and `Symbiosis`. Each topic has several paragraphs associated with it, and each paragraph has several questions and answers related to it. When we separated the train/validation/test splits, we separated them on the topic level. For example, if the training set contains paragraphs and questions about the topic `Beyoncé`, neither the validation nor test sets will contain any questions on this topic. All questions about a certain topic are isolated to one split of the data. # # Like the Financial PhraseBank Dataset, we randomly selected 80% of the questions for training, 10% for validation, and 10% for test. This resulted in `69125` test examples, `8952` validation examples, and `8744` testing examples. The `answer` field was removed from test examples. # # Training on the full train split could take a lot of time, so we are going to clip the train split to 20k examples for the sake of this tutorial. # ! head -20000 $SQUAD_DIR/squad_train.jsonl > $SQUAD_DIR/squad_short_train.jsonl # ### Assistant Dataset # + ASSISTANT_DIR = os.path.join(DATA_DIR, "assistant") os.makedirs(ASSISTANT_DIR, exist_ok=True) # Download the assisent dataset # !wget https://github.com/xliuhw/NLU-Evaluation-Data/archive/master.zip # !unzip master.zip -d {ASSISTANT_DIR} # - # Process virtual assistant intent and slot classification data # !python $NEMO_DIR/prompt_learning_assistant_preprocessing.py # !head -5 $ASSISTANT_DIR/assistant_train.jsonl # For the virtual assistant dataset, there are a set of 64 possible intents: # !echo 'Intents: ' $(wc -l < {ASSISTANT_DIR}/nemo-format/dict.intents.csv) # !cat {ASSISTANT_DIR}/nemo-format/dict.intents.csv # and 55 types of slots: # print all slots from the NeMo format slot dictionary # !echo 'Slots: ' $(wc -l < {ASSISTANT_DIR}/nemo-format/dict.slots.csv) # !cat {ASSISTANT_DIR}/nemo-format/dict.slots.csv # Each slot label consists of the slot type followed by specific text from the utterance corresponding to that slot type in parentheses. For example, the utterance `"tell my facebook group that i've arrived"` has the intent label `social_post` and the slot label `media_type(facebook)`. Utterances each have one intent label and zero or more slot labels. In cases where there is no slot label, our GPT model should predict the word `None`. # # Json objects for each training example contain three fields: `taskname`, `utterance`, and `label`. For this dataset, our preprocessing script formatted our intent and slot labels to look like `"\nIntent: transport_taxi\nSlots: transport_agency(golden taxi), time(seven pm), date(today)"`. With newline characters (\n) separating intent and slot labels. Our train jsonl file has `9960` training examples. Our validation and test jsonl files each have `538` training examples. Test examples do not have the `label` field. # # The preprocessing script can be found at `scripts/dataset_processing/nlp/intent_and_slot/prompt_learning_assistant_preprocessing.py` # # P-Tuning Model Config Setup # # Now we will begin setting up the config file used for prompt/p-tuning our GPT models! GPT Prompt learning within NeMo uses a class called `MegatronGPTPromptLearningModel` which has its own config file. We will start by loading an example prompt learning config file, then make changes to it to fit our tasks and training plans. # + from omegaconf import OmegaConf CONFIG_DIR = os.path.join(NEMO_DIR, "conf") os.makedirs(CONFIG_DIR, exist_ok=True) # Download the example config file wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/language_modeling/conf/megatron_gpt_prompt_learning_config.yaml', CONFIG_DIR) # Load the example config file so we can start editing it CONFIG_PATH = os.path.join(CONFIG_DIR, "megatron_gpt_prompt_learning_config.yaml") config = OmegaConf.load(CONFIG_PATH) # - # First let's set the datasets we've created in the config. We are going to start by p-tuning a GPT model on the **Sentiment Analysis** and **Intent and Slot Classification** tasks. We will set only those two datasets in the config file right now. config.model.data.train_ds = [f"{SENTIMENT_DIR}/financial_phrase_bank_train.jsonl", f"{ASSISTANT_DIR}/assistant_train.jsonl"] config.model.data.validation_ds = [f"{SENTIMENT_DIR}/financial_phrase_bank_val.jsonl", f"{ASSISTANT_DIR}/assistant_val.jsonl"] # The `MegatronGPTPromptLearningModel` class expects datasets to be a list of `.json` or `.jsonl` file paths. You can give it multiple datasets at once like we did above. # ### Prompt Formatting # Now that we have our datasets set, lets define what we want the prompt for each task to look like. # # To customize different prompts for different tasks, we simply need to specify the prompt task template in the config file. The virtual token markers `<|VIRTUAL_PROMPT_#|>` signify where you want virtual tokens to be placed in the template string. `<|VIRTUAL_PROMPT_0|>`, `<|VIRTUAL_PROMPT_1|>`, and `<|VIRTUAL_PROMPT_2|>` indicate where a number of virtual tokens matching the values given at `virtual_token_splits[0]`, `virtual_token_splits[1]` and `virtual_token_splits[2]` will be placed. The other variable fields `{var}` refer to the fields in the data json. # # For example, given: # # 1. the data json **{"sentence1": "And he said, Mama, I'm home.", "sentence2": "He didn't say a word."}** # # # 2. virtual token splits set to `virtual_token_splits = [3, 3, 3]` # # # 3. a prompt template set to `prompt_template = "<|VIRTUAL_PROMPT_0|> Hypothesis: [sentence1], <|VIRTUAL_PROMPT_1|> Premise: [sentence2] <|VIRTUAL_PROMPT_2|> Answer:"` # # the input will be translated into **<span style="color:red">VVV</span> Hypothesis: And he said, Mama, I'm home.<span style="color:red">VVV</span> Premise: He didn't say a word.<span style="color:red">VVV</span> Answer:**, where <span style="color:red">VVV</span> are three virtual tokens. # # Because we are only p-tuning on the Sentiment Analysis and Intent/Slot Classification tasks right now, we only need to set the task templates for those two tasks. But, we are going to go ahead and set the template for all 3 tasks here, just to show that you can set the templates for all the tasks you have planned at one time, then prompt tune/p-tune on them sequentially. # # Let's configure all of our templates for **Sentiment Analysis**, **Intent and Slot Classification**, and **Question Answering** tasks: # # + config.model.task_templates = [ { "taskname": "sentiment", "prompt_template": "<|VIRTUAL_PROMPT_0|> {sentence} sentiment:{label}", "total_virtual_tokens": 10, "virtual_token_splits": [10], "truncate_field": None, "answer_only_loss": True, "answer_field": "label", }, { "taskname": "intent_and_slot", "prompt_template": "<|VIRTUAL_PROMPT_0|> Predict intent and slot <|VIRTUAL_PROMPT_1|> :\n{utterance}{label}", "total_virtual_tokens": 10, "virtual_token_splits": [7, 3], "truncate_field": None, "answer_only_loss": False, }, { "taskname": "squad", "prompt_template": "<|VIRTUAL_PROMPT_0|> Context: {context}\n\nQuestion: {question}\n\nAnswer:{answer}", "total_virtual_tokens": 15, "virtual_token_splits": [15], "truncate_field": "context", "answer_only_loss": True, "answer_field": "answer", }, ] # - # Note each `task_template` item has 5 fields. # # - **`prompt_template`** is a string showing the model where to place virtual tokens and how to map dataset json fields to where they belong in the model prompt. # # # - **`taskname`** refers to the same `taskname` in the dataset json objects. # # # - **`total_virtual_tokens`** specifies the total number of virtual tokens that will be inserted into the model prompt. # # # - **`virtual_token_splits`** specifies the number of virtual tokens that belong at each `<|VIRTUAL_PROMPT_#|>` marker. `virtual_token_splits` values should add up to `total_virtual_tokens`. The number of `virtual_token_splits` should match the number of `<|VIRTUAL_PROMPT_#|>` markers. # # # - **`truncate_field`** specifies which field in the data json to truncate if the length of the input exceeds the maximum sequence length of the model. If `truncate_field` is set to `None`, examples that are too long are simply dropped from the dataset. # # # - **`answer_only_loss`** Whether to limit loss calculation to only the answer portion of the prompt during tuning. `True` Strongly recommended for long prompts, but shorter prompts with single word answers seem to benefit from setting this to `False`. # # # - **`answer_field`** The field in the data json corresponding to the answer. The loss will only be calculated on this portion of the prompt if `answer_only_loss` is `True`. The answer field must be at the end of the prompt template. # # In the `task_templates` we set above, `squad` has a different number of virtual tokens than `sentiment` and `intent_and_slot`. This is because we will be p-tuning on `squad` after we p-tune on the other two tasks and **we do not need to use the same number of virtual tokens between sessions**. We also set the `truncate` field for squad because the context can sometimes be longer than the model's max sequence length, and we want that field to be truncated if the example is too long. Lastly, we set `answer_only_loss` to true for `squad` due to the longer prompt. We've found `answer_only_loss=True` to work significantly better for this task. # ### Setting New Tasks # After you p-tune your model this time, you can always go back and p-tune or prompt-tune your model on more tasks without over writing the virtual prompts who've trained this time. You can also use a different number of `total_virtual_tokens` between each training session as long as tasks ptuned or prompt tuned at the same time have the same number of `total_virtual_tokens`. For this reason, when you p-tune on a new task, you need to tell your model which of your tasks are new and which ones already exist (and thus you don't want to tune them). # # You do this by setting the `new_tasks` and `existing_tasks` values in the config file. Because we are p-tuning a model with no existing tasks, you should set `existing_tasks=[]` and `new_tasks=["sentiment", "intent_and_slot"]` as follows: config.model.existing_tasks = [] config.model.new_tasks = ["sentiment", "intent_and_slot"] # After p-tuning and/or prompt tuning is complete, you can run inference on all tasks at the same time, regardless of their `total_virtual_tokens` value. # ### Setting The Pre-Trained GPT Model # We still need to set which GPT model we want to p-tune/prompt tune. Prompt learning methods work best with large GPT language models (5B or above), but the purposes of this tutorial, we are going to download a 345M parameter GPT model from NVIDIA NGC. # Check what GPT .nemo models we have available on NGC from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel MegatronGPTModel.list_available_models() # If we wanted to use the GPT model class directly, we could instantiate a trainer then download the model by calling running # `gpt_model = MegatronGPTModel.from_pretrained(model_name="megatron_gpt_345m", trainer=trainer).cuda()`. But we just need the `.nemo` file in our working NeMo directory in this tutorial, so we will download it using `wget`. # Download the model from NGC gpt_file_name = "megatron_gpt_345m.nemo" # !wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/nemo/megatron_gpt_345m/versions/1/files/megatron_gpt_345m.nemo -O {NEMO_DIR}/{gpt_file_name} # Now that we have a `.nemo` GPT file to work with. We need to add its path in our prompt learning config. # Set GPT model path on prompt learning config config.model.language_model_path = gpt_file_name # We can also set where we want the final prompt tuned model to be saved by setting `model.nemo_path`. By default the tuned prompt learning model will be saved in your current working directory to a `.nemo` file with the same name as your experiment (`config.name`). Let's change the save name to be `multitask_p_tuned_gpt.nemo`. **Your model path must end in `.nemo`.** config.model.nemo_path = "multitask_p_tuned_gpt.nemo" # ### Setting P-Tuning Specific Params # Within the config file, p-tuning and prompt-tuning each have a couple of hyperparameters specific to them. We first need to tell the model that we want to do p-tuning, not prompt-tuning. To do this, we set the **`model.virtual_prompt_style`** hyperparameter like this: from nemo.collections.nlp.modules.common import VirtualPromptStyle config.model.virtual_prompt_style = VirtualPromptStyle.P_TUNING # Then we can set the 2 p-tuning specific parameters. Reminder, pp-tuning uses an LSTM prompt encoder to predict virtual tokens. # # - **`p_tuning.dropout`** the LSTM prompt encoder dropout probability # - **`p_tuning.num_layers`** the number of LSTM layers you want your p-tuning prompt encoder to have # config.model.p_tuning.dropout = 0.0 config.model.p_tuning.num_layers = 2 # Let's have a look at all the values we've set in the model config. You can change any of these values in the same manner we've been using above. # Final model config print(OmegaConf.to_yaml(config.model)) # ### Setting Prompt-Tuning Specific Params # # Though we are not using prompt tuning in this training session, lets go over the prompt tuning specific parameters we would use if we were. # # - **`prompt_tuning.new_prompt_init_methods`** Whether you want to initialize virtual token embeddings from the embeddings of existing parts of the model's vocabulary (either 'text' or 'random') # - **`prompt_tuning.new_prompt_init_text`** The text you want to use if you have 'text' in the list above, should be None otherwise. # # Each of the above hyperparameters are a list of strings. # # `new_prompt_init_methods` would look like `["text", "random", "text", "text"]` if you were prompt tuning on 4 tasks at once, and you wanted the second task in `new_tasks` to use random initialization. # # `new_prompt_init_text` might look like `["some text I want to use", None, "some other text", "task text goes here"]` for those four new tasks. # # The order of both should correspond to the order of the tasks you have listed in `model.new_tasks`. # # Building the PyTorch Lightning Trainer # NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem. # # Let's first instantiate a Trainer object # + import torch import pytorch_lightning as pl from nemo.collections.nlp.parts.nlp_overrides import NLPDDPPlugin from pytorch_lightning.plugins.environments.torchelastic_environment import TorchElasticEnvironment # lets modify some trainer configs # checks if we have GPU available and uses it accelerator = 'gpu' if torch.cuda.is_available() else 'cpu' config.trainer.accelerator = accelerator config.trainer.devices = 1 config.trainer.max_epochs = 10 config.trainer.val_check_interval = 1.0 # for PyTorch Native AMP set precision=16 config.trainer.precision = 16 if torch.cuda.is_available() else 32 # remove distributed training flags config.trainer.strategy = None # setup cluster environment parameters" # use torch elastic cluster environment so `create_process_externally` is True # the launcher is set to None. It will not try to spawn new processes. # It won't create the misconfiguration error because of the `interactive session` os.environ["LOCAL_RANK"] = '0' os.environ["RANK"] = '0' os.environ["WORLD_SIZE"] = '1' plugins = [NLPDDPPlugin(find_unused_parameters=False), TorchElasticEnvironment()] trainer = pl.Trainer(plugins=plugins, **config.trainer) print("Trainer config - \n") print(OmegaConf.to_yaml(config.trainer)) # - # # Setting up a NeMo Experiment # # NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it: # + from nemo.utils.exp_manager import exp_manager # Set name of the experiment config.name = 'sentiment_intent_slot_p_tuning' config.exp_manager.resume_if_exists = False # Init the experiment manager and view the exp_dir exp_dir = exp_manager(trainer, config.get("exp_manager", None)) exp_dir = str(exp_dir) print(exp_dir) # - # We can also set learning hyperparameters as follows: # Set some of the learning parameters config.model.optim.lr = 1e-4 config.model.batch_size = 16 # # First P-Tuning Session # The only thing left to do is load up the model and begin p-tuning! # + from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import MegatronGPTPromptLearningModel model = MegatronGPTPromptLearningModel(cfg=config.model, trainer=trainer) # - # Training set to 10 epochs by default in a cell above # Each epoch will take around 1min 15sec, but training time can vary trainer.fit(model) # When training completes, p-tuned virtual tokens from the prompt encoder are automatically moved to a `prompt_table` where all prompt tuned and p-tuned soft prompts are stored. The LSTM `prompt_encoder` is then removed from the model. This allows us to preserve previously p-tuned soft prompts while still maintaining the ability to add new p-tuned or prompt-tuned soft prompts in the future. The `prompt_table` uses the `taskname` as a key to look up the correct virtual tokens for a specified task. # # Inference After First P-Tuning Session # One way to run inference after p-tuning or prompt-tuning your model is to call `model.generate()`. `model.generate()` takes in # # - `inputs` which can be either a list of dictionary objects or `.jsonl` files containing dictionary objects, # - `length_params` # - `sampling_params` # # as arguments. More information about the [text generation API can be found here](https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/modules/common/transformer/text_generation.py). # # If `length_params` and `sampling_params` are set to `None`, the model generates output with a greedy decoding strategy and generates up to `30` new tokens. Most predictive downstream tasks (not text generation tasks), use greedy sampling. To see other ways to run inference with your prompt learning model and more details on how to define various inference parameters, visit `examples/nlp/language_modeling/megatron_gpt_eval.py`. # # Below are some randomly selected test examples from the sentiment classification and intent and slot classification test files. Notice that the `label` field is dropped from all test examples. The `MegatronPromptLearningDataset` called within `.generate()` automatically leaves fields in the prompt template empty when they are not provided in the data json. test_examples = [ {"taskname": "intent_and_slot", "utterance": "tell me who will win the next presidential election"}, {"taskname": "intent_and_slot", "utterance": "i would like to pickup a veggie sub with a cookie from subway"}, {"taskname": "intent_and_slot", "utterance": "email happy new year to john"}, {"taskname": "intent_and_slot", "utterance": "set the alarm to seven am for work"}, {"taskname": "sentiment", "sentence": "The products have a low salt and fat content ."}, {"taskname": "sentiment", "sentence": "The agreement is valid for four years ."}, {"taskname": "sentiment", "sentence": "Diluted EPS rose to EUR3 .68 from EUR0 .50 ."}, {"taskname": "sentiment", "sentence": "The company is well positioned in Brazil and Uruguay ."}, {"taskname": "sentiment", "sentence": "Profit before taxes decreased by 9 % to EUR 187.8 mn in the first nine months of 2008 , compared to EUR 207.1 mn a year earlier ."}, ] # This allows us to prompt the p-tuned GPT model as follows: # # ``` # "<|VIRTUAL_PROMPT_0|> Predict intent and slot <|VIRTUAL_PROMPT_1|> :\nplease will you check it." # "<|VIRTUAL_PROMPT_0|> Predict intent and slot <|VIRTUAL_PROMPT_1|> :\nset the alarm to seven am for work" # ------------------ # "<|VIRTUAL_PROMPT_0|> The products have a low salt and fat content . sentiment:" # "<|VIRTUAL_PROMPT_0|> The agreement is valid for four years . sentiment:" # # ``` # # With the correct virtual tokens inserted at each `<|VIRTUAL_PROMPT_#|>` # + response = model.generate(inputs=test_examples, length_params=None) print('The prediction results of some sample queries with the trained model:') for result in response['sentences']: print(result) print("-" * 30) # - # # Adding a New Task to a Previously Tuned Model # Now that we've p-tuned our GPT model on intent/slot classification and sentiment analysis, lets add SQuAD question answering using p-tuning! First we need to update the config for the new task. # # # Updating The Model Config # We need to update: # # 1. `name` # 3. `model.restore_path` # 5. `model.existing_tasks` # 6. `model.new_tasks` # 7. `model.data.train_ds` # 8. `model.data.validation_ds` # # Remember that we already set `task_templates` for SQuAD when we were defining the task template for the other two tasks. We would add it here if we had not already set it above. # Here we tell the config that we want to **load the previously p-tuned model and add new tasks to it.** # + # Change the experiment name config.name = 'squad_p_tuning' # Change restore path from null to the p-tuned model we just finished training config.model.restore_path = "multitask_p_tuned_gpt.nemo" # Move the tasks you just p-tuned your model on to existing tasks, and add squad to the new task list config.model.existing_tasks = ["sentiment", "intent_and_slot"] config.model.new_tasks = ["squad"] # - # Update the dataset list to squad train and val # Using a subset of the training data for the sake of time config.model.data.train_ds = [f"{SQUAD_DIR}/squad_short_train.jsonl"] config.model.data.validation_ds = [f"{SQUAD_DIR}/squad_val.jsonl"] # # Second P-Tuning Session # + # Reset some model trainer and training params config.trainer.max_epochs = 1 config.trainer.val_check_interval = 1000 # Limiting the number of validation batches for sake of time config.trainer.limit_val_batches = 100 config.model.optim.lr = 5e-4 config.model.optim.sched.min_lr = 1e-5 config.model.batch_size = 4 # Reset the trainer plugins = [NLPDDPPlugin(find_unused_parameters=False), TorchElasticEnvironment()] trainer = pl.Trainer(plugins=plugins, **config.trainer) print("Trainer config - \n") print(OmegaConf.to_yaml(config.trainer)) # - # Reset experiment manager exp_dir = exp_manager(trainer, config.get("exp_manager", None)) exp_dir = str(exp_dir) print(exp_dir) # Restore previously tuned model with updated config model = MegatronGPTPromptLearningModel.restore_from(config.model.restore_path, config.model, trainer=trainer) # Prompt tune your model on squad # This will take around 10 min per epoch, timing is variable trainer.fit(model) # # Inference After Second P-Tuning Session # # Now we can run inference on all 3 tasks at once. The answers for the intent/slot and sentiment tasks should be identical to the ones from before p-tuning on squad. #Test examples with squad examples added test_examples = [ {"taskname": "intent_and_slot", "utterance": "tell me who will win the next presidential election"}, {"taskname": "intent_and_slot", "utterance": "i would like to pickup a veggie sub with a cookie from subway"}, {"taskname": "intent_and_slot", "utterance": "email happy new year to john"}, {"taskname": "intent_and_slot", "utterance": "set the alarm to seven am for work"}, {"taskname": "sentiment", "sentence": "The products have a low salt and fat content ."}, {"taskname": "sentiment", "sentence": "The agreement is valid for four years ."}, {"taskname": "sentiment", "sentence": "Diluted EPS rose to EUR3 .68 from EUR0 .50 ."}, {"taskname": "sentiment", "sentence": "The company is well positioned in Brazil and Uruguay ."}, {"taskname": "sentiment", "sentence": "Profit before taxes decreased by 9 % to EUR 187.8 mn in the first nine months of 2008 , compared to EUR 207.1 mn a year earlier ."}, {"taskname": "squad", "context": "The build was released for download later in the day in standard 32-bit and 64-bit versions, plus a special 64-bit version which included SDKs and developer tools (Visual Studio Express and Expression Blend) for developing Metro-style apps. The Windows Store was announced during the presentation, but was not available in this build. According to Microsoft, there were about 535,000 downloads of the developer preview within the first 12 hours of its release. Originally set to expire on March 11, 2012, in February 2012 the Developer Preview's expiry date was changed to January 15, 2013.", "question": "When was the Developer preview initially intended to expire?"}, {"taskname": "squad", "context": "The structures of most federal governments incorporate mechanisms to protect the rights of component states. One method, known as 'intrastate federalism', is to directly represent the governments of component states in federal political institutions. Where a federation has a bicameral legislature the upper house is often used to represent the component states while the lower house represents the people of the nation as a whole. A federal upper house may be based on a special scheme of apportionment, as is the case in the senates of the United States and Australia, where each state is represented by an equal number of senators irrespective of the size of its population.", "question": "What is a bicameral legislature?"}, {"taskname": "squad", "context": "Imported mystery religions, which offered initiates salvation in the afterlife, were a matter of personal choice for an individual, practiced in addition to carrying on one's family rites and participating in public religion. The mysteries, however, involved exclusive oaths and secrecy, conditions that conservative Romans viewed with suspicion as characteristic of \"magic\", conspiratorial (coniuratio), or subversive activity. Sporadic and sometimes brutal attempts were made to suppress religionists who seemed to threaten traditional morality and unity, as with the senate's efforts to restrict the Bacchanals in 186 BC.", "question": "What was the practice of religion to the Romans?"} ] # + response = model.generate(inputs=test_examples, length_params=None) print('The prediction results of some sample queries with the trained model:') for result in response['sentences']: print(result) print("-" * 30) # - # For squad, remember we only trained our model on ~29% of the training examples (20k instead of ~70k) and for only 1 epoch. Results will improve if the full training set is used and the model is tuned for more training steps. # # This concludes our tutorial! For command line and script usage demos, [please see our docs](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/prompt_learning.html)
tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Preparing stellar population models # # Paintbox is particularly designed to model the observed spectrum, i.e., # the flux from stars as a function of the wavelength, of galaxies where # stars are not individually resolved. In these cases, the most important # ingredient to describe the stellar features in observations are stellar # population models, which describe the properties of an emsemble of stars # with different properties, e.g., ages, metallicities, etc. Several groups of # astronomers distribute their models for free for users, either publicily # or under request. However, these models are distributed in different # ways, and we have to deal with the input models accordingly before # their use with **paintbox**. The examples below indicate how to deal with # some popular stellar populations models. # ### Using EMILES stellar population models # # Stellar populations models from the MILES library can be obtained in a variety of ways in their [website](http://research.iac.es/proyecto/miles//pages/stellar-libraries/miles-library.php). For this example, we will use the packages [astropy](https://www.astropy.org) to handle FITS fiels and tables, and the [pPXF]([ppxf](https://pypi.org/project/ppxf/) for rebinning the data to a logarithmic scale. # + import os import numpy as np from astropy.io import fits from astropy.table import Table from ppxf import ppxf_util # - # For this example, we will use a set of single stellar population (SSP) templates of the E-MILES models (version 11) produced with BASTI isochrones and assuming a Chabrier initial mass function, which can be downloaded in a tarball from their public ftp link available [in their website](http://miles.iac.es/) (EMILES_BASTI_BASE_CH_FITS.tar.gz, 95 Mb). After downloading the data, it is necessary to unpack the tarfile (preferentially into a subdirectory, which we name emiles_v11), containing the 636 SSP spectra in this case. emiles_dir = "/home/kadu/Dropbox/SSPs/emiles_v11" w1 = 2600 # Minimum wavelength w2 = 10000 # Maximum wavelength # We can use the [MILES name convention](http://research.iac.es/proyecto/miles/pages/ssp-models/name-convention.php) to read the files with the models. def miles_filename(specrange, imf, imfslope, metal, age): """ Returns the name of a fits file in the MILES library according to the name convention. """ msign = "p" if metal >= 0. else "m" azero = "0" if age < 10. else "" return "{0}{1}{2:.2f}Z{3}{4:.2f}T{5}{6:02.4f}" \ "_iTp0.00_baseFe.fits".format(specrange, imf, \ imfslope, msign, abs(metal), azero, age) # Below we produce a list containing all the spectra that we are going to use in our analysis (filenames), and we also produce an astropy [Table object](https://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table) storing the parameters of the files. specrange = "E" # options: "E", "M", "B", "R", "C" imf = "ch" # options: "un", "bi", "ku", "kb", "ch" imfslope = 1.3 # Values of metallicities and ages available for BASTI isochrones Zs = np.array([-0.96, -0.66, -0.35, -0.25, 0.06, 0.15, 0.26, 0.4]) Ts = np.linspace(1., 14., 27) # Using only ages > 1 Gyr ssps_grid = np.array(np.meshgrid(Ts, Zs)).T.reshape(-1, 2) nssps = len(ssps_grid) filenames = [] for t, z in ssps_grid: filenames.append(miles_filename(specrange, imf, imfslope, z, t)) params = Table(ssps_grid, names=["T", "Z"]) # We use the information in the header of one spectrum to determine the wavelength range (which is always the same for a given set of models). Notice that the wavelength range covered by the EMILES models is large (from the near-UV to the IR). h = fits.getheader(os.path.join(emiles_dir, filenames[0])) wave = (h['CRVAL1'] + h['CDELT1'] * (np.arange((h['NAXIS1'])) + 1 - h['CRPIX1'])) # Finally, we need to trim and/or rebin the model spectra. We need to trim the data to cover only the spectral region of the observed data. Notice, however, that we should always have extra coverage in the models, preferentially on both edges of the spectra, if we are also modeling the kynematics of the galaxy, and also to avoid problems at the edges of the models owing to convolutions (below I use 500 Angstrom, but this can be optimized for a galaxy according to their redshift). We may also rebin the data, either to have the same wavelength dispersion of the observations, or to a logarithmic scale to model the kinematics. We use the pPXF for this purpose, assuming a velocity scale for the rebinning of 200 km/s. # + velscale = 200 extra_wave = 500 idx = np.where((wave >= w1 - extra_wave) & (wave <= w2 + extra_wave)) wave = wave[idx] # Trimming wavelength array # Using first spectrum to get array size after rebbining flux = fits.getdata(os.path.join(emiles_dir, filenames[0]))[idx] wrange = [wave[0], wave[-1]] newflux, logLam, velscale = ppxf_util.log_rebin(wrange, flux, velscale=velscale) # Loop to trim and rebin spectra ssps = np.zeros((nssps, len(newflux))) for i, filename in enumerate(filenames): flambda = fits.getdata(os.path.join(emiles_dir, filename))[idx] flux_log = ppxf_util.log_rebin(wrange, flambda, velscale=velscale)[0] ssps[i] = flux_log # - # Now, we just need to store the processed data into a FITS file. hdu1 = fits.PrimaryHDU(ssps) hdu1.header["EXTNAME"] = "SSPS" hdu2 = fits.BinTableHDU(params) hdu2.header["EXTNAME"] = "PARAMS" hdu3 = fits.BinTableHDU(Table([logLam], names=["loglam"])) hdu3.header["EXTNAME"] = "WAVE" hdulist = fits.HDUList([hdu1, hdu2, hdu3]) output = "emiles_chabrier_w{}_{}_vel{}.fits".format(w1, w2, velscale) hdulist.writeto(output, overwrite=True) # In this particular example, we will obtain a multi-extension FITS file named "emiles_chabrier_w2600_10000_vel200.fits", which contains the 2D array with the models, a parameter table, and an 1D array with the wavelength array. Notice that, in practice, if often necessary to degrade the model spectra to match the resolution of the observations, which can be performed with the task paintbox.utils.broad2res. # ### Using CvD stellar population models # # Models from the [<NAME> <NAME> (2012)](https://ui.adsabs.harvard.edu/abs/2012ApJ...747...69C/abstract) and [Conroy et al. (2018)](https://ui.adsabs.harvard.edu/abs/2018ApJ...854..139C/abstract), a.k.a. CvD models, can be obtained under request to the authors. Similar to the MILES models, CvD are also distributed as SSP models with varying ages, metallicities, and IMFs, but also provide response functions that allow the variation of several individual elements, e.g., C, N, O, Mg, Si, Ca, Ti, and Fe. In this cases, To handle these models, we use the utility class `CvD18`, built from the basic `paintbox` classes, to handle the input files and produce spectra with any combination of parameters. # + import os import glob import numpy as np from paintbox.utils import CvD18, disp2vel import matplotlib.pyplot as plt # Defining an arbitrary wavelength region in the near IR w1, w2 = 8000, 13000 # Setting the wavelength window sigma = 300 # Velocity dispersion of the output models wave = disp2vel([w1, w2], sigma) outdir = os.path.join(os.getcwd(), "CvD18_tutorials") ssp = CvD18(wave, sigma=sigma, outdir=outdir) # - # The code needs to prepare the templates in the first time a execution is performed, but the models can be saved to disk (with the option store=True) for quick loading after the fist time they are called. The result is an object that can be easily used to call CvD models. # Checking the parameter names print(ssp.parnames) # See all the options and methods available in the documentation of CvD18 for more details.
tutorials/preparing_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.0 64-bit # metadata: # interpreter: # hash: cf85b414d3663472de89104473c842eaab37d7b845999caf56a47ccda76ea2f8 # name: python3 # --- # # US - Baby Names # ### Introduction: # # We are going to use a subset of [US Baby Names](https://www.kaggle.com/kaggle/us-baby-names) from Kaggle. # In the file it will be names from 2004 until 2014 # # # ### Step 1. Import the necessary libraries import pandas as pd import numpy as np # ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/06_Stats/US_Baby_Names/US_Baby_Names_right.csv). url = 'https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/06_Stats/US_Baby_Names/US_Baby_Names_right.csv' # ### Step 3. Assign it to a variable called baby_names. baby_names = pd.read_csv(url, sep = ',') print(baby_names) baby_names.info() df_baby_names = baby_names.copy() # ### Step 4. See the first 10 entries df_baby_names.head(10) # ### Step 5. Delete the column 'Unnamed: 0' and 'Id' df_baby_names.drop(df_baby_names.columns.values[:2], axis = 1, inplace=True) df_baby_names.head(5) # ### Step 6. Are there more male or female names in the dataset? # + F = 0 M = 0 for val in df_baby_names['Gender']: if val == 'F': F += 1 elif val == 'M': M += 1 print(F, M) # - df_baby_names.groupby('Gender').count().Count # ### Step 7. Group the dataset by name and assign to names names = df_baby_names.groupby('Name').sum().loc[:,'Count':'Count'] names # ### Step 8. How many different names exist in the dataset? len(names) # ### Step 9. What is the name with most occurrences? df_baby_names.max('Count') # ### Step 10. How many different names have the least occurrences? names[names == names['Count'].min()].count() # ### Step 11. What is the median name occurrence? names.median() # ### Step 12. What is the standard deviation of names? names.std() # ### Step 13. Get a summary with the mean, min, max, std and quartiles. names.describe()
2_Ejercicios/Primera_parte/Entregables/2. Ejercios_Mod1/2020_12_18/6_Stats/US_Baby_Names/US_baby_names.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <!--BOOK_INFORMATION--> # <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png"> # *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by <NAME>; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).* # # *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* # <!--NAVIGATION--> # < [In Depth: Naive Bayes Classification](05.05-Naive-Bayes.ipynb) | [Contents](Index.ipynb) | [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb) > # # In Depth: Linear Regression # Just as naive Bayes (discussed earlier in [In Depth: Naive Bayes Classification](05.05-Naive-Bayes.ipynb)) is a good starting point for classification tasks, linear regression models are a good starting point for regression tasks. # Such models are popular because they can be fit very quickly, and are very interpretable. # You are probably familiar with the simplest form of a linear regression model (i.e., fitting a straight line to data) but such models can be extended to model more complicated data behavior. # # In this section we will start with a quick intuitive walk-through of the mathematics behind this well-known problem, before seeing how before moving on to see how linear models can be generalized to account for more complicated patterns in data. # # We begin with the standard imports: # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns; sns.set() import numpy as np # ## Simple Linear Regression # # We will start with the most familiar linear regression, a straight-line fit to data. # A straight-line fit is a model of the form # $$ # y = ax + b # $$ # where $a$ is commonly known as the *slope*, and $b$ is commonly known as the *intercept*. # # Consider the following data, which is scattered about a line with a slope of 2 and an intercept of -5: rng = np.random.RandomState(1) x = 10 * rng.rand(50) y = 2 * x - 5 + rng.randn(50) plt.scatter(x, y); # We can use Scikit-Learn's ``LinearRegression`` estimator to fit this data and construct the best-fit line: # + from sklearn.linear_model import LinearRegression model = LinearRegression(fit_intercept=True) model.fit(x[:, np.newaxis], y) xfit = np.linspace(0, 10, 1000) yfit = model.predict(xfit[:, np.newaxis]) plt.scatter(x, y) plt.plot(xfit, yfit); # - # The slope and intercept of the data are contained in the model's fit parameters, which in Scikit-Learn are always marked by a trailing underscore. # Here the relevant parameters are ``coef_`` and ``intercept_``: print("Model slope: ", model.coef_[0]) print("Model intercept:", model.intercept_) # We see that the results are very close to the inputs, as we might hope. # The ``LinearRegression`` estimator is much more capable than this, however—in addition to simple straight-line fits, it can also handle multidimensional linear models of the form # $$ # y = a_0 + a_1 x_1 + a_2 x_2 + \cdots # $$ # where there are multiple $x$ values. # Geometrically, this is akin to fitting a plane to points in three dimensions, or fitting a hyper-plane to points in higher dimensions. # # The multidimensional nature of such regressions makes them more difficult to visualize, but we can see one of these fits in action by building some example data, using NumPy's matrix multiplication operator: # + rng = np.random.RandomState(1) X = 10 * rng.rand(100, 3) y = 0.5 + np.dot(X, [1.5, -2., 1.]) model.fit(X, y) print(model.intercept_) print(model.coef_) # - # Here the $y$ data is constructed from three random $x$ values, and the linear regression recovers the coefficients used to construct the data. # # In this way, we can use the single ``LinearRegression`` estimator to fit lines, planes, or hyperplanes to our data. # It still appears that this approach would be limited to strictly linear relationships between variables, but it turns out we can relax this as well. # ## Basis Function Regression # # One trick you can use to adapt linear regression to nonlinear relationships between variables is to transform the data according to *basis functions*. # We have seen one version of this before, in the ``PolynomialRegression`` pipeline used in [Hyperparameters and Model Validation](05.03-Hyperparameters-and-Model-Validation.ipynb) and [Feature Engineering](05.04-Feature-Engineering.ipynb). # The idea is to take our multidimensional linear model: # $$ # y = a_0 + a_1 x_1 + a_2 x_2 + a_3 x_3 + \cdots # $$ # and build the $x_1, x_2, x_3,$ and so on, from our single-dimensional input $x$. # That is, we let $x_n = f_n(x)$, where $f_n()$ is some function that transforms our data. # # For example, if $f_n(x) = x^n$, our model becomes a polynomial regression: # $$ # y = a_0 + a_1 x + a_2 x^2 + a_3 x^3 + \cdots # $$ # Notice that this is *still a linear model*—the linearity refers to the fact that the coefficients $a_n$ never multiply or divide each other. # What we have effectively done is taken our one-dimensional $x$ values and projected them into a higher dimension, so that a linear fit can fit more complicated relationships between $x$ and $y$. # ### Polynomial basis functions # # This polynomial projection is useful enough that it is built into Scikit-Learn, using the ``PolynomialFeatures`` transformer: from sklearn.preprocessing import PolynomialFeatures x = np.array([2, 3, 4]) poly = PolynomialFeatures(3, include_bias=False) poly.fit_transform(x[:, None]) # We see here that the transformer has converted our one-dimensional array into a three-dimensional array by taking the exponent of each value. # This new, higher-dimensional data representation can then be plugged into a linear regression. # # As we saw in [Feature Engineering](05.04-Feature-Engineering.ipynb), the cleanest way to accomplish this is to use a pipeline. # Let's make a 7th-degree polynomial model in this way: from sklearn.pipeline import make_pipeline poly_model = make_pipeline(PolynomialFeatures(7), LinearRegression()) # With this transform in place, we can use the linear model to fit much more complicated relationships between $x$ and $y$. # For example, here is a sine wave with noise: # + rng = np.random.RandomState(1) x = 10 * rng.rand(50) y = np.sin(x) + 0.1 * rng.randn(50) poly_model.fit(x[:, np.newaxis], y) yfit = poly_model.predict(xfit[:, np.newaxis]) plt.scatter(x, y) plt.plot(xfit, yfit); # - # Our linear model, through the use of 7th-order polynomial basis functions, can provide an excellent fit to this non-linear data! # ### Gaussian basis functions # # Of course, other basis functions are possible. # For example, one useful pattern is to fit a model that is not a sum of polynomial bases, but a sum of Gaussian bases. # The result might look something like the following figure: # ![](figures/05.06-gaussian-basis.png) # [figure source in Appendix](#Gaussian-Basis) # The shaded regions in the plot are the scaled basis functions, and when added together they reproduce the smooth curve through the data. # These Gaussian basis functions are not built into Scikit-Learn, but we can write a custom transformer that will create them, as shown here and illustrated in the following figure (Scikit-Learn transformers are implemented as Python classes; reading Scikit-Learn's source is a good way to see how they can be created): # + from sklearn.base import BaseEstimator, TransformerMixin class GaussianFeatures(BaseEstimator, TransformerMixin): """Uniformly spaced Gaussian features for one-dimensional input""" def __init__(self, N, width_factor=2.0): self.N = N self.width_factor = width_factor @staticmethod def _gauss_basis(x, y, width, axis=None): arg = (x - y) / width return np.exp(-0.5 * np.sum(arg ** 2, axis)) def fit(self, X, y=None): # create N centers spread along the data range self.centers_ = np.linspace(X.min(), X.max(), self.N) self.width_ = self.width_factor * (self.centers_[1] - self.centers_[0]) return self def transform(self, X): return self._gauss_basis(X[:, :, np.newaxis], self.centers_, self.width_, axis=1) gauss_model = make_pipeline(GaussianFeatures(20), LinearRegression()) gauss_model.fit(x[:, np.newaxis], y) yfit = gauss_model.predict(xfit[:, np.newaxis]) plt.scatter(x, y) plt.plot(xfit, yfit) plt.xlim(0, 10); # - # We put this example here just to make clear that there is nothing magic about polynomial basis functions: if you have some sort of intuition into the generating process of your data that makes you think one basis or another might be appropriate, you can use them as well. # ## Regularization # # The introduction of basis functions into our linear regression makes the model much more flexible, but it also can very quickly lead to over-fitting (refer back to [Hyperparameters and Model Validation](05.03-Hyperparameters-and-Model-Validation.ipynb) for a discussion of this). # For example, if we choose too many Gaussian basis functions, we end up with results that don't look so good: # + model = make_pipeline(GaussianFeatures(30), LinearRegression()) model.fit(x[:, np.newaxis], y) plt.scatter(x, y) plt.plot(xfit, model.predict(xfit[:, np.newaxis])) plt.xlim(0, 10) plt.ylim(-1.5, 1.5); # - # With the data projected to the 30-dimensional basis, the model has far too much flexibility and goes to extreme values between locations where it is constrained by data. # We can see the reason for this if we plot the coefficients of the Gaussian bases with respect to their locations: # + def basis_plot(model, title=None): fig, ax = plt.subplots(2, sharex=True) model.fit(x[:, np.newaxis], y) ax[0].scatter(x, y) ax[0].plot(xfit, model.predict(xfit[:, np.newaxis])) ax[0].set(xlabel='x', ylabel='y', ylim=(-1.5, 1.5)) if title: ax[0].set_title(title) ax[1].plot(model.steps[0][1].centers_, model.steps[1][1].coef_) ax[1].set(xlabel='basis location', ylabel='coefficient', xlim=(0, 10)) model = make_pipeline(GaussianFeatures(30), LinearRegression()) basis_plot(model) # - # The lower panel of this figure shows the amplitude of the basis function at each location. # This is typical over-fitting behavior when basis functions overlap: the coefficients of adjacent basis functions blow up and cancel each other out. # We know that such behavior is problematic, and it would be nice if we could limit such spikes expliticly in the model by penalizing large values of the model parameters. # Such a penalty is known as *regularization*, and comes in several forms. # ### Ridge regression ($L_2$ Regularization) # # Perhaps the most common form of regularization is known as *ridge regression* or $L_2$ *regularization*, sometimes also called *Tikhonov regularization*. # This proceeds by penalizing the sum of squares (2-norms) of the model coefficients; in this case, the penalty on the model fit would be # $$ # P = \alpha\sum_{n=1}^N \theta_n^2 # $$ # where $\alpha$ is a free parameter that controls the strength of the penalty. # This type of penalized model is built into Scikit-Learn with the ``Ridge`` estimator: from sklearn.linear_model import Ridge model = make_pipeline(GaussianFeatures(30), Ridge(alpha=0.1)) basis_plot(model, title='Ridge Regression') # The $\alpha$ parameter is essentially a knob controlling the complexity of the resulting model. # In the limit $\alpha \to 0$, we recover the standard linear regression result; in the limit $\alpha \to \infty$, all model responses will be suppressed. # One advantage of ridge regression in particular is that it can be computed very efficiently—at hardly more computational cost than the original linear regression model. # ### Lasso regression ($L_1$ regularization) # # Another very common type of regularization is known as lasso, and involves penalizing the sum of absolute values (1-norms) of regression coefficients: # $$ # P = \alpha\sum_{n=1}^N |\theta_n| # $$ # Though this is conceptually very similar to ridge regression, the results can differ surprisingly: for example, due to geometric reasons lasso regression tends to favor *sparse models* where possible: that is, it preferentially sets model coefficients to exactly zero. # # We can see this behavior in duplicating the ridge regression figure, but using L1-normalized coefficients: from sklearn.linear_model import Lasso model = make_pipeline(GaussianFeatures(30), Lasso(alpha=0.001)) basis_plot(model, title='Lasso Regression') # With the lasso regression penalty, the majority of the coefficients are exactly zero, with the functional behavior being modeled by a small subset of the available basis functions. # As with ridge regularization, the $\alpha$ parameter tunes the strength of the penalty, and should be determined via, for example, cross-validation (refer back to [Hyperparameters and Model Validation](05.03-Hyperparameters-and-Model-Validation.ipynb) for a discussion of this). # ## Example: Predicting Bicycle Traffic # As an example, let's take a look at whether we can predict the number of bicycle trips across Seattle's Fremont Bridge based on weather, season, and other factors. # We have seen this data already in [Working With Time Series](03.11-Working-with-Time-Series.ipynb). # # In this section, we will join the bike data with another dataset, and try to determine the extent to which weather and seasonal factors—temperature, precipitation, and daylight hours—affect the volume of bicycle traffic through this corridor. # Fortunately, the NOAA makes available their daily [weather station data](http://www.ncdc.noaa.gov/cdo-web/search?datasetid=GHCND) (I used station ID USW00024233) and we can easily use Pandas to join the two data sources. # We will perform a simple linear regression to relate weather and other information to bicycle counts, in order to estimate how a change in any one of these parameters affects the number of riders on a given day. # # In particular, this is an example of how the tools of Scikit-Learn can be used in a statistical modeling framework, in which the parameters of the model are assumed to have interpretable meaning. # As discussed previously, this is not a standard approach within machine learning, but such interpretation is possible for some models. # # Let's start by loading the two datasets, indexing by date: # !curl -o FremontBridge.csv https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD import pandas as pd counts = pd.read_csv('FremontBridge.csv', index_col='Date', parse_dates=True) weather = pd.read_csv('data/BicycleWeather.csv', index_col='DATE', parse_dates=True) # Next we will compute the total daily bicycle traffic, and put this in its own dataframe: daily = counts.resample('d').sum() daily['Total'] = daily.sum(axis=1) daily = daily[['Total']] # remove other columns # We saw previously that the patterns of use generally vary from day to day; let's account for this in our data by adding binary columns that indicate the day of the week: days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] for i in range(7): daily[days[i]] = (daily.index.dayofweek == i).astype(float) # Similarly, we might expect riders to behave differently on holidays; let's add an indicator of this as well: from pandas.tseries.holiday import USFederalHolidayCalendar cal = USFederalHolidayCalendar() holidays = cal.holidays('2012', '2016') daily = daily.join(pd.Series(1, index=holidays, name='holiday')) daily['holiday'].fillna(0, inplace=True) # We also might suspect that the hours of daylight would affect how many people ride; let's use the standard astronomical calculation to add this information: # + def hours_of_daylight(date, axis=23.44, latitude=47.61): """Compute the hours of daylight for the given date""" days = (date - pd.datetime(2000, 12, 21)).days m = (1. - np.tan(np.radians(latitude)) * np.tan(np.radians(axis) * np.cos(days * 2 * np.pi / 365.25))) return 24. * np.degrees(np.arccos(1 - np.clip(m, 0, 2))) / 180. daily['daylight_hrs'] = list(map(hours_of_daylight, daily.index)) daily[['daylight_hrs']].plot() plt.ylim(8, 17) # - # We can also add the average temperature and total precipitation to the data. # In addition to the inches of precipitation, let's add a flag that indicates whether a day is dry (has zero precipitation): # + # temperatures are in 1/10 deg C; convert to C weather['TMIN'] /= 10 weather['TMAX'] /= 10 weather['Temp (C)'] = 0.5 * (weather['TMIN'] + weather['TMAX']) # precip is in 1/10 mm; convert to inches weather['PRCP'] /= 254 weather['dry day'] = (weather['PRCP'] == 0).astype(int) daily = daily.join(weather[['PRCP', 'Temp (C)', 'dry day']]) # - # Finally, let's add a counter that increases from day 1, and measures how many years have passed. # This will let us measure any observed annual increase or decrease in daily crossings: daily['annual'] = (daily.index - daily.index[0]).days / 365. # Now our data is in order, and we can take a look at it: daily.head() # With this in place, we can choose the columns to use, and fit a linear regression model to our data. # We will set ``fit_intercept = False``, because the daily flags essentially operate as their own day-specific intercepts: # + # Drop any rows with null values daily.dropna(axis=0, how='any', inplace=True) column_names = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun', 'holiday', 'daylight_hrs', 'PRCP', 'dry day', 'Temp (C)', 'annual'] X = daily[column_names] y = daily['Total'] model = LinearRegression(fit_intercept=False) model.fit(X, y) daily['predicted'] = model.predict(X) # - # Finally, we can compare the total and predicted bicycle traffic visually: daily[['Total', 'predicted']].plot(alpha=0.5); # It is evident that we have missed some key features, especially during the summer time. # Either our features are not complete (i.e., people decide whether to ride to work based on more than just these) or there are some nonlinear relationships that we have failed to take into account (e.g., perhaps people ride less at both high and low temperatures). # Nevertheless, our rough approximation is enough to give us some insights, and we can take a look at the coefficients of the linear model to estimate how much each feature contributes to the daily bicycle count: params = pd.Series(model.coef_, index=X.columns) params # These numbers are difficult to interpret without some measure of their uncertainty. # We can compute these uncertainties quickly using bootstrap resamplings of the data: from sklearn.utils import resample np.random.seed(1) err = np.std([model.fit(*resample(X, y)).coef_ for i in range(1000)], 0) # With these errors estimated, let's again look at the results: print(pd.DataFrame({'effect': params.round(0), 'error': err.round(0)})) # We first see that there is a relatively stable trend in the weekly baseline: there are many more riders on weekdays than on weekends and holidays. # We see that for each additional hour of daylight, 129 ± 9 more people choose to ride; a temperature increase of one degree Celsius encourages 65 ± 4 people to grab their bicycle; a dry day means an average of 548 ± 33 more riders, and each inch of precipitation means 665 ± 62 more people leave their bike at home. # Once all these effects are accounted for, we see a modest increase of 27 ± 18 new daily riders each year. # # Our model is almost certainly missing some relevant information. For example, nonlinear effects (such as effects of precipitation *and* cold temperature) and nonlinear trends within each variable (such as disinclination to ride at very cold and very hot temperatures) cannot be accounted for in this model. # Additionally, we have thrown away some of the finer-grained information (such as the difference between a rainy morning and a rainy afternoon), and we have ignored correlations between days (such as the possible effect of a rainy Tuesday on Wednesday's numbers, or the effect of an unexpected sunny day after a streak of rainy days). # These are all potentially interesting effects, and you now have the tools to begin exploring them if you wish! # <!--NAVIGATION--> # < [In Depth: Naive Bayes Classification](05.05-Naive-Bayes.ipynb) | [Contents](Index.ipynb) | [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb) > # testing complete; Gopal
tests/ml-books/05.06-Linear-Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # LOGISTIC REGRESSION WITH CUSTOM DATA import os import numpy as np import tensorflow as tf import matplotlib.pyplot as plt print ("Packages loaded") # # Load data # + # Load them! cwd = os.getcwd() loadpath = cwd + "/data/custom_data.npz" l = np.load(loadpath) # See what's in here print (l.files) # Parse data trainimg = l['trainimg'] trainlabel = l['trainlabel'] testimg = l['testimg'] testlabel = l['testlabel'] use_gray = l['use_gray'] ntrain = trainimg.shape[0] nclass = trainlabel.shape[1] dim = trainimg.shape[1] ntest = testimg.shape[0] print ("%d train images loaded" % (ntrain)) print ("%d test images loaded" % (ntest)) print ("%d dimensional input" % (dim)) print ("%d classes" % (nclass)) # - # # Define network # + tf.set_random_seed(0) # Parameters of Logistic Regression learning_rate = 0.001 training_epochs = 1000 batch_size = 10 display_step = 100 # Create Graph for Logistic Regression x = tf.placeholder("float", [None, dim]) y = tf.placeholder("float", [None, nclass]) W = tf.Variable(tf.zeros([dim, nclass]), name = 'weights') b = tf.Variable(tf.zeros([nclass])) # - # # Define functions WEIGHT_DECAY_FACTOR = 1 # 0.000001 l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()]) _pred = tf.nn.softmax(tf.matmul(x, W) + b) cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(_pred) , reduction_indices=1)) cost = cost + WEIGHT_DECAY_FACTOR*l2_loss optm = tf.train.GradientDescentOptimizer( learning_rate).minimize(cost) _corr = tf.equal(tf.argmax(_pred, 1), tf.argmax(y, 1)) accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) init = tf.initialize_all_variables() print ("Functions ready") # # Optimize # Launch the graph sess = tf.Session() sess.run(init) # Training cycle for epoch in range(training_epochs): avg_cost = 0. num_batch = int(ntrain/batch_size) # Loop over all batches for i in range(num_batch): randidx = np.random.randint(ntrain, size=batch_size) batch_xs = trainimg[randidx, :] batch_ys = trainlabel[randidx, :] # Fit training using batch data sess.run(optm, feed_dict={x: batch_xs, y: batch_ys}) # Compute average loss avg_cost += sess.run(cost , feed_dict={x: batch_xs, y: batch_ys})/num_batch # Display logs per epoch step if epoch % display_step == 0: print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost)) train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print (" Training accuracy: %.3f" % (train_acc)) test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel}) print (" Test accuracy: %.3f" % (test_acc)) print ("Optimization Finished!") # # CLOSE SESSION sess.close() print ("Session closed.")
notebooks/logistic_regression_customdata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # %matplotlib inline from matplotlib import gridspec, cm from plot import bootstrap_samples_mean, StyleCycler from typing import List, Union import matplotlib.pyplot as plt import pandas import numpy as np import functools plt.style.use('seaborn') plt.style.use('seaborn-paper') plt.style.use('tableau-colorblind10') plt.rcParams.update({ 'figure.figsize': (2.4, 1), 'text.usetex': True, 'axes.titlesize': 6, 'font.size': 5, 'ytick.labelsize': 2, 'xtick.labelsize': 2, 'axes.labelsize': 3, 'legend.fontsize': 4, 'legend.borderpad': 0, 'lines.markersize': 1, 'lines.linewidth': 0.5, 'errorbar.capsize': 2, 'ytick.major.pad': 2, 'xtick.major.pad': 2, 'axes.labelpad': 2, 'grid.linewidth': 0.1 }) # + [markdown] tags=[] # ## Notes # # 1. Chose a set of clique size (i.e. 10 / 15 / 20 / 30 or something like that) # 2. Aggregate by added (for a missing added) or by added (by a fixed missing) # 3. Plot ratio and time, which are the metrics # - raw2_results = pandas.read_csv('../results/quasi2.csv', index_col=0) raw3_results = pandas.read_csv('../results/quasi3.csv', index_col=0) raw4_results = pandas.read_csv('../results/quasi4.csv', index_col=0) default_algo = dict( find2 = 'Find2', findq = 'FindQ', findqg = 'FindQ (grow)' ) def plot_results(raw: pandas.DataFrame, fix:dict, groupby: str, algorithms: dict=default_algo, xlabel: str=None, fig=None, gs=None, title:str=None, ylabel:bool=False, legend: bool=False): nsamples = 500 cycler = StyleCycler(['o', 's', 'D'], ['--', '-.', ':'], plt.rcParams['axes.prop_cycle']) if fig is None: fig = plt.figure() mask = functools.reduce(lambda a, b: a & b, [raw[k] == v for k, v in fix.items()]) masked = raw[mask] agg_mean = dict() agg_std = dict() for name, grp in masked.groupby(groupby): sample_means = pandas.DataFrame([grp.sample(frac=1, replace=True).mean() for _ in range(nsamples)]) agg_mean[name] = sample_means.mean() agg_std[name] = sample_means.std() agg_mean = pandas.DataFrame(agg_mean).T agg_std = pandas.DataFrame(agg_std).T agg_timeout = masked.groupby([groupby]).agg({f'{algo}_time': lambda x: x.isnull().sum() for algo in algorithms}).astype(int) if gs is None: grid = gridspec.GridSpec(ncols=1, nrows=3, hspace=0, figure=fig) else: grid = gridspec.GridSpecFromSubplotSpec(ncols=1, nrows=3, hspace=0.15, subplot_spec=gs) # Ratio ax_ratio = fig.add_subplot(grid[0, 0]) for (algo, label), (marker, line, color) in zip(algorithms.items(), cycler): ax_ratio.errorbar(agg_mean.index, agg_mean[f'{algo}_jaccard'], yerr=agg_std[f'{algo}_jaccard'], marker=marker, linestyle=line, label=label, c=color) if title: ax_ratio.set_title(title) # Time ax_time = fig.add_subplot(grid[1, 0], sharex=ax_ratio) for (algo, label), (marker, line, color) in zip(algorithms.items(), cycler): ax_time.errorbar(agg_mean.index, agg_mean[f'{algo}_time'], yerr=agg_std[f'{algo}_time'], marker=marker, linestyle=line, label=label, c=color) # Timeouts if len(agg_mean.index) > 2: bar_width = (agg_mean.index[-1] - agg_mean.index[-2])/2. else: bar_width = 0.01 ax_timeout = fig.add_subplot(grid[2, 0], sharex=ax_ratio) bottom = np.zeros(len(agg_timeout.index)) for (algo, label), (marker, line, color) in zip(algorithms.items(), cycler): ax_timeout.bar(agg_timeout.index, agg_timeout[f'{algo}_time'], bottom=bottom, width=bar_width, label=label, color=color) bottom += agg_timeout[f'{algo}_time'] if not np.any(bottom > 0): ax_timeout.set_ylim(0, 1) ax_timeout.set_yticks([0,]) if ylabel: ax_ratio.set_ylabel('Jaccard index') ax_time.set_ylabel('Time (s)') ax_timeout.set_ylabel('Timeouts') fig.align_ylabels([ax_ratio, ax_time, ax_timeout]) ax_timeout.grid(False) ax_timeout.set_xlabel(xlabel) ax_timeout.set_xticks(agg_mean.index[1::2]) ax_timeout.set_ylim(0) plt.setp(ax_ratio.get_xaxis(), visible=False) plt.setp(ax_time.get_xaxis(), visible=False) if legend: ax_timeout.legend(loc='upper center', bbox_to_anchor=(0.5, -0.5), ncol=len(algorithms)) return fig # + fig = plt.figure() gs0 = gridspec.GridSpec(ncols=3, nrows=1, figure=fig, wspace=0.2) plot_results(raw2_results, dict(missing=0.1, clique=10), groupby='added', xlabel='$\\beta$', fig=fig, gs=gs0[0], title='Clique size 10', ylabel=True) plot_results(raw2_results, dict(missing=0.1, clique=20), groupby='added', xlabel='$\\beta$', fig=fig, gs=gs0[1], title='Clique size 20', legend=True) plot_results(raw2_results, dict(missing=0.1, clique=30), groupby='added', xlabel='$\\beta$', fig=fig, gs=gs0[2], title='Clique size 30') fig.savefig('/home/aalvarez/Downloads/2hyper_beta.eps', bbox_inches='tight', pad_inches=0.05) plt.show() # + fig = plt.figure() gs0 = gridspec.GridSpec(ncols=3, nrows=1, figure=fig, wspace=0.2) plot_results(raw2_results, dict(added=0.0, clique=10), groupby='missing', xlabel='$\\alpha$', fig=fig, gs=gs0[0], title='Clique size 10', ylabel=True) plot_results(raw2_results, dict(added=0.0, clique=20), groupby='missing', xlabel='$\\alpha$', fig=fig, gs=gs0[1], title='Clique size 20', legend=True) plot_results(raw2_results, dict(added=0.0, clique=30), groupby='missing', xlabel='$\\alpha$', fig=fig, gs=gs0[2], title='Clique size 30') fig.savefig('/home/aalvarez/Downloads/2hyper_alpha.eps', bbox_inches='tight', pad_inches=0.05) plt.show() # + fig = plt.figure() gs0 = gridspec.GridSpec(ncols=3, nrows=1, figure=fig, wspace=0.2) plot_results(raw3_results, dict(missing=0.1, clique=10), groupby='added', xlabel='$\\beta$', fig=fig, gs=gs0[0], title='Clique size 10', ylabel=True) plot_results(raw3_results, dict(missing=0.1, clique=20), groupby='added', xlabel='$\\beta$', fig=fig, gs=gs0[1], title='Clique size 20', legend=True) plot_results(raw3_results, dict(missing=0.1, clique=30), groupby='added', xlabel='$\\beta$', fig=fig, gs=gs0[2], title='Clique size 30') fig.savefig('/home/aalvarez/Downloads/3hyper_beta.eps', bbox_inches='tight', pad_inches=0.05) plt.show() # + fig = plt.figure() gs0 = gridspec.GridSpec(ncols=3, nrows=1, figure=fig, wspace=0.2) plot_results(raw3_results, dict(added=0.0, clique=10), groupby='missing', xlabel='$\\alpha$', fig=fig, gs=gs0[0], title='Clique size 10', ylabel=True) plot_results(raw3_results, dict(added=0.0, clique=20), groupby='missing', xlabel='$\\alpha$', fig=fig, gs=gs0[1], title='Clique size 20', legend=True) plot_results(raw3_results, dict(added=0.0, clique=30), groupby='missing', xlabel='$\\alpha$', fig=fig, gs=gs0[2], title='Clique size 30') fig.savefig('/home/aalvarez/Downloads/3hyper_alpha.eps', bbox_inches='tight', pad_inches=0.05) plt.show() # + fig = plt.figure() gs0 = gridspec.GridSpec(ncols=2, nrows=1, figure=fig, wspace=0.2) plot_results(raw4_results, dict(missing=0.1, clique=10), groupby='added', xlabel='$\\beta$', fig=fig, gs=gs0[0], title='Clique size 10', ylabel=True, legend=True) plot_results(raw4_results, dict(missing=0.1, clique=20), groupby='added', xlabel='$\\beta$', fig=fig, gs=gs0[1], title='Clique size 20') #plot_results(raw4_results, dict(missing=0.1, clique=30), groupby='added', xlabel='$\\beta$', fig=fig, gs=gs0[2], title='Clique size 30') fig.savefig('/home/aalvarez/Downloads/4hyper_beta.eps', bbox_inches='tight', pad_inches=0.05) plt.show() # + fig = plt.figure() gs0 = gridspec.GridSpec(ncols=3, nrows=1, figure=fig, wspace=0.2) plot_results(raw4_results, dict(added=0.0, clique=10), groupby='missing', xlabel='$\\alpha$', fig=fig, gs=gs0[0], title='Clique size 10', ylabel=True) plot_results(raw4_results, dict(added=0.0, clique=20), groupby='missing', xlabel='$\\alpha$', fig=fig, gs=gs0[1], title='Clique size 20', legend=True) plot_results(raw4_results, dict(added=0.0, clique=30), groupby='missing', xlabel='$\\alpha$', fig=fig, gs=gs0[2], title='Clique size 30') fig.savefig('/home/aalvarez/Downloads/4hyper_alpha.eps', bbox_inches='tight', pad_inches=0.05) plt.show() # -
notebooks/quasiclique-runs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # ## Esercizi Introduttivi # # <sup><sub>Adattato da: [learnbyexample/py_regular_expressions](https://github.com/learnbyexample/py_regular_expressions/blob/master/exercises/Exercises.md)</sup></sub>. # Contiene molti altri esempi (in inglese), ben catalogati e con soluzioni. # + pycharm={"name": "#%%\n"} import re # + pycharm={"name": "#%%\n"} ''' Sostituire tutte le occorrenze di `5` con `five` ''' ip = 'They ate 5 apples and 5 oranges' pattern = r'\b5\b' res = re.sub(pattern, 'five', ip) assert res == 'They ate five apples and five oranges' # + pycharm={"name": "#%%\n"} ''' Scrivere una regex che controlla se la stinga inizia con `be` ''' line1 = 'be nice' line2 = 'oh no\nbear spotted' pattern = r'^be' pat = re.compile(pattern) assert bool(pat.search(line1)) assert not(bool(pat.search(line2))) # + pycharm={"name": "#%%\n"} ''' Dato un testo estrarre tutte le parole contenute tra le parentesi (assumi che non ci sono coppie di parentesi non chiuse) ''' text = 'another (way) to reuse (portion) matched (by) capture groups' pattern = r'\((.*?)\)' res = re.findall(pattern, text) assert res == ['way', 'portion', 'by'] # + pycharm={"name": "#%%\n"} ''' Data la sequenza in input, estrarre tutte le parole in cui è presente almeno una sequenza ripetuta. Esempio: `232323` and `897897` ''' text = '1234 2323 453545354535 9339 11 60260260' pattern = r'\b(\d+)\1+\b' pat = re.compile(pattern) res = [m[0] for m in pat.finditer(text)] assert res == ['2323', '453545354535', '11'] # + pycharm={"name": "#%%\n"} ''' Convertire le seguenti stringe in dizionari. Il nome delle chiavi per i campi sono: `name`, `maths`, `phy`. ''' row1 = '<NAME>,75,89' row2 = 'rose, 88, 92' pattern = r'(?P<name>[^,]+),\s*(?P<maths>[^,]+),\s*(?P<phy>[^,]+)' pat = re.compile(pattern) res1 = pat.search(row1).groupdict() assert res1 == {'name': '<NAME>', 'maths': '75', 'phy': '89'}, res1 res2 = pat.search(row2).groupdict() assert res2 == {'name': 'rose', 'maths': '88', 'phy': '92'}, res2 # -
AnalisiTesto/Lezione1/Esercizi/simple_regex.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # An analysis of the State of the Union speeches # # **Authors: <NAME>, <NAME>, <NAME>** # ## Abstract # The following paper explores and analyzes the text of US State of the Union Addresses from 1790-Present. After some basic exploratory analysis of the metadata, we use NLTK to compute general text analysis metrics, create a term-document matrix for direct corpus analysis, present scatterplots of the speeches using Multi-dimensional scaling techniques to collapse the high dimensional data onto two dimensions, and then compute a variety of complexity metrics to investigate the changes in complexity to US State of the Union Addresses over time. # ## Introduction # A common criticism of modern American politics is that it has gotten dumbed down over time. The level of discourse of our modern politics is said to pale in comparison to the discourse of such intellectual giants of the past, from <NAME> to FDR. We sought to examine whether such criticisms are valid, or are merely the results of malcontents looking at the past with rose-tinted glasses. By evaluating State of the Union speech complexity using a variety of metrics, we found that the complexity of State of the Union speeches show a strong trend toward decreasing complexity over time. # # ## Part 1: # To begin, we represented each speech in the dataset as a row in a Pandas dataframe, with information such as the date that we use to explore our given dataset. Using the date, we computed the numbers of speeches given in each month: # <img src="fig/addresses_month.png"> # Further exploration showed us a gap in the number of speeches, attributed to the dataset missing data for speeches made by Grover Cleveland in his second term between 1892 and 1897: # <img src="fig/timeline.png"> # Then, we represented the speeches as a list with each element being the raw text of each speech directly from the dataset. # # # ## Part 2: # In part 2, we used NLTK to compute certain text analysis metrics on the speeches through tokenizing the speech words, such as the number of non-unique words, unique words, non-unique stemmed words, unique stemmed words, characters, and sentences. In our analysis, we decided to include numbers as well as letters since we deemed them to be a part of the speech. However, we filtered out elements of the speech we felt were irrelevant to the analysis such as the most common english stop words, punctuation, and only kept the stem of words as dictated by the SnowballStemmer algorithm. We used this stemmer because it produced us figures that match the data given by the guideposts, and further research indicated that this stemmer algorithm is well-established for text analysis. # # We then created graphs to visualize the changes in these metrics over time: # <img src="fig/speech_changes.png"> # # We also represented the above charts as violin plots by president, to help discern and partition the data as discrete elements rather than as a time function: # # <img src="fig/speech_characteristics.png"> # ## Part 3: # During part 3 of the project, we have loaded the data from part 2 and save the variables as speech_word and speeches_cleaned. In the first cell, we use for loop to go through the speeches_cleaned line-by-line to get all the unique words and set all unique words into a new variable called unique_word. For the future use, we used sorted() function to make our unique_word variable contains the numbers of unique words for each president in order. The results gave us 18797 unique words in total. To create a function called word_vector(doc, vocab), we use two for loops to add each word and the number of it appears into one matrix. For stem words in speeches_cleaned, we append the speech(number of the word appears) and the unique word into a vector and create a matrix. Lastly, we transformed the matrix into data frame using pd.DataFrame and np.array functions, and take the transpose to make “words” as rows, and count as “columns”. Now the wmat matrix contains the number of counts for each word in the entire document. # For the last calculation part, we found out number of words that has 0 in wmat, and take the summation of number of zeros for each president and take summation again for each row. This gave us the total number of zeros in the whole matrix, and divided by the total number of entries. Total number of entries can be calculated as the number of rows multiple the number of columns. Shape.[0] and shape.[1]. # # Finally we got 93.15% zeros amount all the data, which indicated that a huge amount of words are not been said for different presidents. President in each term had their own way of giving speeches, and it was not too common to see a lot of repeating in the words they said. That is the reason more than 93% of the wmat entries are zeros. # ## Part 4: # Using the Term-Document matrix calculated in part 3, we normalize the word counts to convert the counts into probability distributions, for both the speeches grouped by president and for each speech individually. We then perform multidimensional scaling on the count probabilities via manifold learning, with a stress majorization optimization strategy, using euclidean distances and Jensen-Shannon Divergence as our difference metrics. # # The scatter plots of the word-document and president-aggregated word-document after being projected onto the planes of their stress majorizing axes, are as follows: # # <img src="fig/mds_naive.png"> # # <img src="fig/mds_naive_all.png"> # # <img src="fig/mds_jsdiv.png"> # # <img src="fig/mds_jdsiv_all.png"> # # # # ## Part 5: # We used the following metrics as measures of complexity: # 1. Words Per Sentence # 2. Average Word Length # 3. Flesch-Kincaid Readability Score (details below) # 4. Flesch-Kincaid Grade Level (details below) # 5. Proportion of words in most common 7500 words # # The Flesch-Kincaid Readability Score is calculated by # # 206.835 - 1.015(words/sentence) - 84.6(syllables/word) # # With the readablity scores being interpreted as follows: # # <img src="fig/fk_table.png"> # # The Flesch-Kincaid Grade Level is calculated by # # 0.39(words/sentence) + 11.8(syllables/word) - 15.59 # # With the output value corresponding to the reading level of the US Grade. # # For the proportion of words in the most common 7500 words, we used data from https://github.com/first20hours/google-10000-english, which contains the 10000 most common English language words as found by Google's Trillion Word Corpus. # # <img src="fig/words_per_sentence.png"> # # <img src="fig/Average_Word_Length.png"> # # <img src="fig/Readability_Score.png"> # # <img src="fig/Grade_Level.png"> # # <img src="fig/Percent_Most_Common.png"> # # <img src="fig/Readability_vs_PCW.png"> # # <img src="fig/WPS_vs_AWL.png"> # # From the figures, it is clear that over time, the complexity of the US State of the Union speeches has declines. The number of words per sentence, the average word length, and the grade level all have a decreasing trend, while the readability scores and the percent of words in the speech that are among the top 7500 most commonly used in the English language, all increase.
main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ZpZmOPeluzkP" # # <center> **Hoheneder Final Term Project:** # # ## <center> **Spatial Evaluation of the Cheat River Watershed Hydrograph Response to a Large Storm Event** # # <center> Study Site: Cheat River Watershed in North-Central, West Virginia, USA # # ESCI895: Hydrologic Data Analysis, Fall 2021 # # University of New Hampshire # + id="mIlPqNCAuvl1" #%% Description of Dataset and Purpose of Code: #Description: # Project File Taking Numerous Stream Gauges for a Given Watershed Area and Evaluating How That Pulse of Water #Moves Through The Given Watershed for a Single Storm Event #Major Products Inlcude: Time Series Plots, Z-Score Values of Discharge, Basic Stats of Discharge Comparison #Major Conclusion: The Storm Event Factors More on the Path of the Storm than the Site Hydrologic Properties # + id="DQzDOwEYvRCa" #Install Libraries for Project: import pandas as pd import datetime from pprint import pprint import numpy as np from matplotlib import pyplot as plt # + [markdown] id="CjcEQr9qwQ9b" # # <center> **Introduction** # # **Central Project Goals:** # # * How does peak discharge temporally vary within a watershed for a single large-scale flooding event? # * How does discharge move thorugh a watershed during a storm event? # * Is it possible to track th location of a storm within a watershed based upon the hydrograph? # * What insight does monitoring discharge provide those seeking to mitigate the damage of flooding in the future? # # . # # **Introducion to Project:** # # This project seeks to address how water specifically moved through the Cheat River watershed during a large-sclae flooding event in 2019. The magnitude of this flooding event slighlty compares to the largest flooding events for the watershed which took place in 1985. **The Election Day Floods of 1985** caused damages in excess of **$700M** to West Virginia which was found “woefully underprepared” for floods of such magnitude (Luke, 1988, “Response to West Virginia’s...”). Evaluating how large discharge events move throughout a watershed temporally and in magnitude can provide insight into what steps can be taken to mitigate damage and harm to communities and their residents, as well as developing a hydrologic comprehension of how water is temporally and geographically intertwined to a watershed area. In the context of West Virginia, centering hydrologic research upon the Cheat River basin seeks to evaluate and defend an **understudied** and **historically disadvantaged** region of the United States in disaster preparedness. # # # Discharge data for the 2019 event will be employed in lieu of the 1985 Election Day Floods due to scant data coverage for the 1985 event. While suitable coverage can be found for the USGS network during 2019, the same cannot be said for 1985 where only two operational discharge gauges were located within the watershed at the time of the storm event. Hence, a proxy for the catastrophic 1985 flooding events will be replaced with the 2019 highly-covered flooding event. # # . # # **Impact of Study:** # # In the context of West Virginia, centering hydrologic research upon the Cheat River basin seeks to evaluate and defend an **understudied** and **historically disadvantaged** region of the United States in disaster preparedness. This study aims to not only produce information that is useful in a global hydrologic sense where it can be applied to numerous watersheds and storm events, but especially looks towards areas where natural disaster resource allocation and preparedness is lacking. # # # # # + id="hro-oRaYwZ0G" #%% Importing Raw Datafiles: #Importing Data Files: albright_file= 'Albright.txt' blackwater_file= 'Blackwater.txt' bowden_file= 'Bowden.txt' hendricks_file = 'Hendricks.txt' parsons_file= 'Parsons.txt' rockville_file= 'Rockville.txt' # + [markdown] id="FfM3HUBswoou" # # <center> **The Cheat River Watershed of North Central West Virginia:** # # # **The Cheat River Watershed:** # # The Cheat River watershed is a 3682.9km2 catchment located entirely within **North-Central West Virginia** where the Cheat River proper flows 135km northward from the confluence of Shavers Fork and the Blackwater River to eventually terminate at Point Marion, PA, as the larger Monongahela River as a portion of the greater Ohio River network. # # . # # **Major Tributaries of the Cheat River Watershed:** # * Shavers Fork # * Blackwater River # * Big Sandy Creek # * Dry Fork # * Dry Run # * Glady Fork # * Laurel Fork # # . # # **Geography of the Cheat River Watershed:** # # The Cheat River watershed is dominated by deciduous forest land cover, where the National Land Cover Dataset (NLCD) identifies over 85% of the watershed area as **deciduous forest** land cover. # # The watershed area is bordered by the **Appalachian Front** physiographic boundary to the east resulting in larger quantities of precipitation in the catchment with an average of 101.59 mm/yr with a maximum of 149.28 mm/yr in the southern extreme of the watershed adjacent to **Cheat Mountain**, from which the watershed receives its name. # # Given the physiographic boundaries of the watershed, elevation also dramatically varies with the eastern border of the watershed composed of the **Appalachian Front** terminus of the **Valley and Ridge** physiographic province, gradually losing elevation westward into the **Appalachian High Plateaus** towards the **Ohio River Valley**. # + id="exw3kUATwp2p" #%% Importing DataFrames: #Albright: df_albright= pd.read_table(albright_file, delimiter="\t", comment='#', header=1, parse_dates=['20d'], index_col=['20d'], na_values= [9999, -9999, 8888, -8888]) #Drop Columns: df_albright= df_albright.drop(columns={"5s", "15s", "6s", "10s", "10s.1"}) #Rename Columns: df_albright= df_albright.rename(columns={"14n": "Discharge (cfs)"}) df_albright= df_albright.rename(columns={"14n.1": "Stage (ft)"}) #Fill NaN Data: df_albright.interpolate(method = 'linear', inplace = True) #Blackwater: df_blackwater= pd.read_table(blackwater_file, delimiter="\t", comment='#', header=1, parse_dates=['20d'], index_col=['20d'], na_values= [9999, -9999, 8888, -8888]) #Drop Columns: df_blackwater= df_blackwater.drop(columns={"5s", "15s", "6s", "10s"}) #Rename Columns: df_blackwater= df_blackwater.rename(columns={"14n": "Discharge (cfs)"}) df_blackwater= df_blackwater.rename(columns={"14n.1": "Stage (ft)"}) #Fill NaN Data: df_blackwater.interpolate(method = 'linear', inplace = True) #Bowden: df_bowden= pd.read_table(bowden_file, delimiter="\t", comment='#', header=1, parse_dates=['20d'], index_col=['20d'], na_values= [9999, -9999, 8888, -8888]) #Drop Columns: df_bowden= df_bowden.drop(columns={"5s", "15s", "6s", "10s"}) #Rename Columns: df_bowden= df_bowden.rename(columns={"14n": "Discharge (cfs)"}) df_bowden= df_bowden.rename(columns={"14n.1": "Stage (ft)"}) #Fill NaN Data: df_bowden.interpolate(method = 'linear', inplace = True) #Hendricks: df_hendricks= pd.read_table(hendricks_file, delimiter="\t", comment='#', header=1, parse_dates=['20d'], index_col=['20d'], na_values= [9999, -9999, 8888, -8888]) #Drop Columns: df_hendricks= df_hendricks.drop(columns={"5s", "15s", "6s", "10s"}) #Rename Columns: df_hendricks= df_hendricks.rename(columns={"14n": "Discharge (cfs)"}) df_hendricks= df_hendricks.rename(columns={"14n.1": "Stage (ft)"}) #Fill NaN Data: df_hendricks.interpolate(method = 'linear', inplace = True) #Parsons: df_parsons= pd.read_table(parsons_file, delimiter="\t", comment='#', header=1, parse_dates=['20d'], index_col=['20d'], na_values= [9999, -9999, 8888, -8888]) #Drop Columns: df_parsons= df_parsons.drop(columns={"5s", "15s", "6s", "10s", "10s.1"}) #Rename Columns: df_parsons= df_parsons.rename(columns={"14n": "Discharge (cfs)"}) df_parsons= df_parsons.rename(columns={"14n.1": "Stage (ft)"}) #Fill NaN Data: df_parsons.interpolate(method = 'linear', inplace = True) #Rockville: df_rockville= pd.read_table(rockville_file, delimiter="\t", comment='#', header=1, parse_dates=['20d'], index_col=['20d'], na_values= [9999, -9999, 8888, -8888]) #Drop Columns: df_rockville= df_rockville.drop(columns={"5s", "15s", "6s", "10s"}) #Rename Columns: df_rockville= df_rockville.rename(columns={"14n": "Discharge (cfs)"}) df_rockville= df_rockville.rename(columns={"14n.1": "Stage (ft)"}) #Fill NaN Data: df_rockville.interpolate(method = 'linear', inplace = True) # + [markdown] id="xj-Q3eT1w6Lj" # ## <center> **Establishing the Study Sites Within the Cheat River Watershed:** # # This study employs a series of six streamflow gauges operated by the **United States Geological Survey (USGS)** to evaluate the properties of 15-min intervals of streamflow discharge, in cubic feet per second moving throughout the Cheat River watershed of a specific large storm event in late-June 2019. This storm event was the single largest storm event for the Cheat River Watershed in terms of discharge during the 2019 calendar year. # # **Streamflow Guages of Study:** # * Dry Fork at Hendricks, WV (USGS: 03065000) # * Blackwater River at Davis, WV (USGS: 03066000) # * Shavers Fork below Bowden, WV (USGS: 03068800) # * Cheat River near Parsons, WV (USGS: 03069500) # * Cheat River at Albright, WV (USGS: 03070260) # * Big Sandy Creek at Rockville, WV (USGS: 03070500) # # # All streamflow gauges within the extent of the Cheat River HUC-8 watershed were selected for this study and were found to be operational for the full extent of the storm event. All data was gathered from the USGS as a portion of the NWIS hydrologic database. As depicted above, the streamflow gauges within the Cheat River watershed are distributed across all of the major tributaries so that no major tributary or another source of channelized hydrologic contribution to the Cheat River hydrologic network is missing from the storm event analysis. # + id="9-CQzJOuwBxK" #%% Create List of DataFrames and Constants for Functions and Looping: #Create List of Gauge Station DataFrames: df_list= [df_albright, df_blackwater, df_bowden, df_hendricks, df_parsons, df_rockville] #Define Watershed Area: watershed_area= 1422 #sq-mi #Define Start and End Dates--Initial Load Includes a Second Storm I Want to Avoid: starting_date= pd.to_datetime('2019-06-28 00:00:00') ending_date= pd.to_datetime('2019-07-05 00:00:00') # + id="5_Kbyl32w-4R" #%% Trim DataFrames to Desried Lengths--This Refuses to Work in a Function (See Below): #Define Function for Trimming DataFrames: def time_trim(df): #Trim DataFrames: df= df[starting_date:ending_date] return df #Iterative For Loop to Trim DataFrames: for item in df_list: time_trim(item) #Doesn't Trim DataFrames??? #Text Output Statement to Let Me Know For Loop Ran: print('') print('DataFrames Trimmed') print('') #Trim DataFrame--For Real This Time: df_albright_trim= df_albright[starting_date:ending_date] df_blackwater_trim= df_blackwater[starting_date:ending_date] df_bowden_trim= df_bowden[starting_date:ending_date] df_hendricks_trim= df_hendricks[starting_date:ending_date] df_parsons_trim= df_parsons[starting_date:ending_date] df_rockville_trim= df_rockville[starting_date:ending_date] #Create df_trim List: df_trim_list = [df_albright_trim, df_blackwater_trim, df_bowden_trim, df_hendricks_trim, df_parsons_trim, df_rockville_trim] # + [markdown] id="tCORhimMxGsj" # # <center> **Methodology:** # # The methodology of this study is based on upon evaluating how similar or dissimilar numerous discharge curves are to one another over the course of a single significant storm event. # # The means of determining how event flow properties vary not only in quantity, but temporally across an array of USGS gauges can be determined in two major steps: **baseflow-event flow hydrograph separation** and a **curve similarity approximation**. Both of these steps can be completed in the realm of time-series analysis, where hydrograph separation serves as a vehicle to identify the means of comparison, and a curve similarity approximation serves as the method of comparison and analysis. From these steps, other interrelated products, such as spatial interpolations of event flow properties can be produced across the watershed area extent. # # . # # **Major Methodlogical Processes:** # * Visualization of Discharge Curves for the Storm Event # * Hydrograph Seperation of Baseflow from Event Flows # * Pearson Coefficient Generation for Curve Similarity Analysis # # . # # All data for this was completed utilizing the Python coding software suite, and all analysis was conducted using the associated Python coding packages of **NumPy** and **Pandas**. Data downloaded from the USGS NWIS hydrologic database was used to retrieve 15-min discharge data for all six gauges from the Cheat River watershed into a single conglomerated timeseries. This single timeseries was converted to a single Pandas dataframe per stream gauge location for data manipulation purposes. # # Within this dataframe, the index included the date and time of each discharge measurement and each column specified the discharge measurement, in cubic feet per second (cfs) of each of the six USGS gauge locations. Following entry into the Python platform, equivalent values of discharge were found for each measurement in terms of discharge in cm/hr as a product of the total watershed extent and a normalized z-score value to identify the degree of variance from normal of each discharge value. # # # + id="ACK0aZbLxbZW" #%% DataFrame Operations: #Define Function for Z-Score Calculations: def zscore_Q(df): #Calculate Z-Score for DataFrame: df['Z-Score Q']= (df['Discharge (cfs)'] - df['Discharge (cfs)'].mean()) / df['Discharge (cfs)'].std() #Define Function for Discharge Equivalence in cm/hr: def discharge_cmhr(df): #Calculate Discharge in cm/hr: df['Discharge (cm/hr)']= (df['Discharge (cfs)']/watershed_area * (1/5280**2) * 30.48 * 3600) #For Loop to Calculate Z-Score for Each DataFrame: for item in df_list: #Iterate For Loop to Calculate Z-Scores: zscore_Q(item) #Iterate For Loop to Calculate Discharge Equivalence: discharge_cmhr(item) for item in df_trim_list: #Iterate For Loop to Calculate Z-Scores: zscore_Q(item) #Iterate For Loop to Calculate Discharge Equivalence: discharge_cmhr(item) #Output Text Statement: print('Calculated Z-Scores for DataFrames') print('') print('Discharge Equivalence in cm/hr Calculated for Each DataFrame') print('') # + [markdown] id="CzZxcm92xez2" # ## <center> **Plotting Discharge Time Series** # # Generating time series to visualize how discharge progressed for the storm event across each of the six measurment gauges. Plots were generated as time series for dependent values of discharge. Plotting these raw time series allows for a visualization of the discharge curves preceeding any analysis of their timing and curve geometry. # # **Time Series Iterations:** # * Discharge (cm/hr) # * Discharge (Z-Score) # # As this study exclusively focuses upon the 2019 Cheat River flooding event, the temporal period of observation was limited to **3 days (June 27th, 2019)** preceding the storm event, throughout the duration of the storm event, and terminating **7 days (July 7th, 2019)** following the end of precipitation. This period was selected as it allowed for the most cohesive estimation of baseflow conditions as a relatively normal rate of baseflow without the significant influence of precipitation could be determined. # # The major visual product of this step is the time series displaying the rise and fall of the hydrograph over the duration of the period of study representing the movement of the storm system through the watershed as a hydrologic pulse. Each line in the time series plot represents a different USGS streamflow gauge such that the magnitude of flow can be visually inferred. Given hydrologic discharge data does not typically display any significant seasonality trends in the roughly two week period this study observes, no need for further time series manipulation or extraction of residual values was seen as necessary analysis. # + id="nnqTz2fHxj5b" #%% Plotting Initial Time Series Curves Over Full Duration: #Create Plotting Area: fig, ax1 = plt.subplots() #Plot Discharge Data: #Albright: ax1.plot(df_albright_trim['Discharge (cm/hr)'], ',', linestyle='-', color='navy', label='Albright') #Blackwater: ax1.plot(df_blackwater_trim['Discharge (cm/hr)'], ',', linestyle='-', color='grey', label='Davis') #Bowden: ax1.plot(df_bowden_trim['Discharge (cm/hr)'], ',', linestyle='-', color='dodgerblue', label='Bowden') #Hendricks: ax1.plot(df_hendricks_trim['Discharge (cm/hr)'], ',', linestyle='-', color='maroon', label='Hendricks') #Parsons: ax1.plot(df_parsons_trim['Discharge (cm/hr)'], ',', linestyle='-', color='orange', label='Parsons') #Rockville: ax1.plot(df_rockville_trim['Discharge (cm/hr)'], ',', linestyle='-', color='darkgreen', label='Rockville') #Axis Formatting: ax1.set_ylim(bottom = 0) ax1.set_xlim(df_albright_trim.index[0], df_albright_trim.index[-1]) fig.autofmt_xdate() #Axis Labels: ax1.set_ylabel('Discharge (cm/hr)', color='k', fontweight="bold", fontsize= 12) ax1.set_xlabel('Date', color='k', fontweight="bold", fontsize= 12) fig.suptitle('Discharge Curves for Cheat River Watershed', fontweight= "bold", fontsize=18) #Legend: fig.legend(bbox_to_anchor= (1.15, 0.75)) # + id="lzKwSJm4xn44" #%% Plotting Initial Time Series Curve Z-Scores Over Full Duration: #Create Plotting Area: fig, ax1 = plt.subplots() #Plot Discharge Data: #Albright: ax1.plot(df_albright_trim['Z-Score Q'], ',', linestyle='-', color='navy', label='Albright') #Blackwater: ax1.plot(df_blackwater_trim['Z-Score Q'], ',', linestyle='-', color='grey', label='Blackwater') #Bowden: ax1.plot(df_bowden_trim['Z-Score Q'], ',', linestyle='-', color='dodgerblue', label='Bowden') #Hendricks: ax1.plot(df_hendricks_trim['Z-Score Q'], ',', linestyle='-', color='maroon', label='Hendricks') #Parsons: ax1.plot(df_parsons_trim['Z-Score Q'], ',', linestyle='-', color='orange', label='Parsons') #Rockville: ax1.plot(df_rockville_trim['Z-Score Q'], ',', linestyle='-', color='darkgreen', label='Rockville') #Axis Formatting: ax1.set_xlim(df_albright_trim.index[0], df_albright_trim.index[-1]) fig.autofmt_xdate() #Axis Labels: ax1.set_ylabel('Discharge (cm/hr)', color='k', fontweight="bold", fontsize= 12) ax1.set_xlabel('Date', color='k', fontweight="bold", fontsize= 12) fig.suptitle('Z-Scored Discharge Curves for Cheat River Watershed', fontweight= "bold", fontsize=18) #Legend: fig.legend(bbox_to_anchor= (1.15, 0.75)) # + [markdown] id="eUFefGnextuU" # ## <center> **Hydrograph Seperation of Discharge Curves:** # # This first major step of this study's methoodlogy is the speration of raw discharge values into subsequent values of **baseflow** and **event flow**. The seperation of hydrograph values is a critical step as it allows for the evaluation of the watershed's response to new, input volumes of water in its system (Blume et al., 2007). The watershed's response provides insight regarding how affecteda given hydrologic channel is based upon the magnitude of water input to the water body. # # * **Baseflow:** The sustaining flow of a river system; the approximated discharge of the river system independent of additional hydrological inputs # # * **Event Flow:** Discharge quantity above the base flow discharge. Effectively, the response of the hydrograph to a particular storm event in terms of discharge # # # For the purposes of this study, seperating values into baseflow and event flow discharge allows for the evaluation of when peak discharge arrived, how much total discharge was associated with the storm event, and how those values compare temporally and in magnitude across the six gauges in the Cheat River watershed. All values were normalized into a z-score value. Normalization of discharge values to a z-score value ensures that the order of magnitude across various streamflow gauges is being represented in a common scale. While all streamflow gauges inherently will increase with any given amount of positive net water input into the system, morphological factors such as channel depth, width, and other considerations all greatly affect how flow moves through the channel at the point of measurement (Crinklaw, 2018). Normalization to a z-score largely bypasses fluvial morphological factors where the dependent discharge is not only common across all stream gauges, but is more representative of the specific channel conditions than a value of raw discharge. # # + id="BLT9v2P_xxZf" #%% Hydrograph Seperation Function: #Define Function: def hydrograph_sep(totalq, watershed): #Find totalq: totalq['Diff'] = totalq['Discharge (cm/hr)'].diff() #Find Antecedent Discharge and Date using 0.000104 Threshold: global antQ_date antQ = (totalq.loc[totalq['Diff'] > 0.000104, 'Discharge (cm/hr)']) antQ_date = antQ.index[0] antQ_val = round(antQ[0], 3) #Find Peak Discharge Date: peakQ_date = totalq['Discharge (cm/hr)'].idxmax() peakQ = totalq['Discharge (cm/hr)'].max() #Calculate Event Duration: N = 0.82*(watershed*1e-6)**0.2 #Calculate End of Event: global end_of_event end_of_event = peakQ_date + datetime.timedelta(days = N) #Calculate Ending Discharge Value: end_Q = totalq.iloc[totalq.index.get_loc(end_of_event,method='nearest'), totalq.columns.get_loc('Discharge (cm/hr)')] #Create baseQ Dataframe: global baseQ baseQ = totalq[['Discharge (cm/hr)']].copy() #Calculate Base Discharge Curve Before Peak: slope1, intercept1= np.polyfit(totalq.loc[totalq.index < antQ_date].index.astype('int64') /1E9, totalq.loc[totalq.index < antQ_date, 'Discharge (cm/hr)'], 1) #Append Data Before Peak: baseQ.loc[antQ_date:peakQ_date,"Discharge (cm/hr)"] = slope1 * (totalq.loc[antQ_date:peakQ_date].index.view('int64')/1e9) + intercept1 #Calculate Base Discharge Curve After Peak: slope2, intercept2= np.polyfit([peakQ_date.timestamp(), end_of_event.timestamp()], [baseQ.loc[peakQ_date, 'Discharge (cm/hr)'], end_Q], 1) #Append Data After Peak: baseQ.loc[peakQ_date:end_of_event,"Discharge (cm/hr)"] = slope2 * (totalq.loc[peakQ_date:end_of_event].index.view('int64')/1e9) + intercept2 #Append BaseQ Values to DataFrame: totalq['BaseQ (cm/hr)'] = baseQ['Discharge (cm/hr)'] #Return Variables: return (baseQ, antQ_date, antQ_val, peakQ_date, peakQ, end_of_event, end_Q) # + id="8Wlt3gMXx3Cg" #%% Modified Time Series Plotting Containing Baseflow: #Define Function with Keyword Arguement for Baseflow: def timeseriesplot(df1, startdate, enddate, baseflow= None): #Create Plot Area: fig, ax1 = plt.subplots() #Plot Discharge Data: ax1.plot(df1['Discharge (cm/hr)'], ',', linestyle='-', color='navy', label='Discharge (cm/hr)') #Axis Formatting: ax1.set_ylim(bottom = 0) ax1.set_xlim(startdate, enddate) fig.autofmt_xdate() #Axis Labels: ax1.set_ylabel('Discharge (cm/hr)', color='k', fontweight="bold", fontsize= 12) ax1.set_xlabel('Date', color='k', fontweight="bold", fontsize= 12) #Optional Arguement Boolean Indicator: if baseflow is not None: ax1.plot(baseflow['Discharge (cm/hr)'], ',', linestyle='-', color='darkseagreen', label=' Baseflow (cm/hr)') #Legend: fig.legend(bbox_to_anchor= (0.65, 0.0)) # + [markdown] id="xy3eozcIx_0m" # ## <center> **Creating Values of Event Discharge** # # To determine the volume of discharge across the storm event, the baseflow discharge value was subtracted from the event flow value. In this sense, a value of event flow is determined for the system that rises above the typical, antecedent, baseflow discharge. This is the movement of excess water that is moving through the system at a given time, and creates the “pulse” of the storm event through the Cheat River watershed. # # * **$Q_{Event} = Q_{Observed} - Q_{Base}$** # # Relatedly, once this effective flow value is calculated, to determine the total volume of effective flow discharge of the storm event, the effective flow discharge curve was integrating over the duration of the storm event. This provides a volumetric measurement of how much water moved through the point of measurement throughout the storm event. # + id="izznIJuVyGtW" #%% Determine Effective Flow: #Define Function: def effect_flow(df): #Calculate Effective Flow: #Ensure All Values of Event Flow are Positive: df['BaseQ (cm/hr)']= np.where(df['BaseQ (cm/hr)'] > 0, df['BaseQ (cm/hr)'], 0) #Redefine Values of Event Flow Equal to Discharge as 0: df['Eff Flow (cm/hr)']= np.where(df['Discharge (cm/hr)'] - df['BaseQ (cm/hr)'] > 0, df['Discharge (cm/hr)'] - df['BaseQ (cm/hr)'], 0) #Create For Loop to Run Function for Each DataFrame: for item in df_list: #Run Event Flow-Effect Flow Function: effect_flow(item) #Output Text Statement to Confirm For Loop: print('') print('Event Flow Calculated for Each DataFrame') print('') # + id="79OcHG8qyS-L" #%% Calculate Z-Score for Effective Flow: #Define Fnction: def zscore_eventflow(df): #Create Z-Score for Event Flow: df['Z-Score EffQ']= (df['Eff Flow (cm/hr)'] - df['Eff Flow (cm/hr)'].mean()) / df['Eff Flow (cm/hr)'].std() #For Loop to Iterate Through: for item in df_list: #Iterate Thorugh Function: zscore_eventflow(item) #Let Me Know This Ran: print('Solved Z-Score for Event Flows') print('') # + [markdown] id="Y-5PpjYqy27R" # ## <center> **Calculating Pearson Coefficient Values for Discharge Curve Correlation** # # The last step of this study is to determine how similar or different the various discharge curves are to each other. A measure of similairty of discharge profiles will provide the information we are seeking regarding determining how a storm event moves thorughout a watershed. # # # A Pearson correlation will generate a coefficient value between -1 and 1 that indicates whether the time series being evaluated are: # # * **Positively Correlated (1)** # * **Not Correlated (0)** # * **Negatively Correlated (-1)** # # # Given the Pearson coefficient is a global rating of synchrony between multiple datasets, an assumption is made to neglect small, spatial phenomena that influence discharge, such as fluctuating temperatures above freezing. It is expected the effects of these phenomena, if present, are temporary and minute enough that they will not affect the global synchrony and correlation of any of the time series. # + id="bri_g8gvyzSW" #%% Pearson Coefficient Calculation for Time Series: #Create Empty Array to Store PEarson Values for Comparison: pearson_array= [] #Albright-Davis Correlation: AlbrightDavisQ=df_albright['Discharge (cm/hr)'].corr(df_blackwater['Discharge (cm/hr)']) #Append to List: pearson_array.append(AlbrightDavisQ) #Albright-Bowden Correlation: AlbrightBowdenQ=df_albright['Discharge (cm/hr)'].corr(df_bowden['Discharge (cm/hr)']) #Append to List: pearson_array.append(AlbrightBowdenQ) #Albright-Hendricks Correlation: AlbrightHendricksQ=df_albright['Discharge (cm/hr)'].corr(df_hendricks['Discharge (cm/hr)']) #Append to List: pearson_array.append(AlbrightHendricksQ) #Albright-Parsons Correlation: AlbrightParsonsQ=df_albright['Discharge (cm/hr)'].corr(df_parsons['Discharge (cm/hr)']) #Append to List: pearson_array.append(AlbrightParsonsQ) #Albright-Rockville Correlation: AlbrightRockvilleQ=df_albright['Discharge (cm/hr)'].corr(df_rockville['Discharge (cm/hr)']) #Append to List: pearson_array.append(AlbrightRockvilleQ) #Davis-Bowden: DavisBowdenQ=df_blackwater['Discharge (cm/hr)'].corr(df_bowden['Discharge (cm/hr)']) #Append to List: pearson_array.append(DavisBowdenQ) #Davis-Hendricks: DavisHendricksQ=df_blackwater['Discharge (cm/hr)'].corr(df_hendricks['Discharge (cm/hr)']) #Append to List: pearson_array.append(DavisHendricksQ) #Davis-Parsons: DavisParsonsQ=df_blackwater['Discharge (cm/hr)'].corr(df_parsons['Discharge (cm/hr)']) #Append to List: pearson_array.append(DavisParsonsQ) #Davis-Rockville: DavisRockvilleQ=df_blackwater['Discharge (cm/hr)'].corr(df_rockville['Discharge (cm/hr)']) #Append to List: pearson_array.append(DavisRockvilleQ) #Bowden-Hendricks: BowdenHendricksQ=df_bowden['Discharge (cm/hr)'].corr(df_hendricks['Discharge (cm/hr)']) #Append to List: pearson_array.append(BowdenHendricksQ) #Bowden-Parsons: BowdenParsonsQ=df_bowden['Discharge (cm/hr)'].corr(df_parsons['Discharge (cm/hr)']) #Append to List: pearson_array.append(BowdenParsonsQ) #Bowden-Rockville: BowdenRockvilleQ=df_bowden['Discharge (cm/hr)'].corr(df_rockville['Discharge (cm/hr)']) #Append to List: pearson_array.append(BowdenRockvilleQ) #Hendricks-Parsons: HendricksParsonsQ=df_hendricks['Discharge (cm/hr)'].corr(df_parsons['Discharge (cm/hr)']) #Append to List: pearson_array.append(HendricksParsonsQ) #Hendricks-Rockville: HendricksRockvilleQ=df_hendricks['Discharge (cm/hr)'].corr(df_rockville['Discharge (cm/hr)']) #Append to List: pearson_array.append(HendricksRockvilleQ) #Parsons-Rockville: ParsonsRockvilleQ=df_parsons['Discharge (cm/hr)'].corr(df_rockville['Discharge (cm/hr)']) #Append to List: pearson_array.append(ParsonsRockvilleQ) # + id="rLVp5fZW2C2X" #%% Creating Small DataFrame for Pearson Values: #List of Gauge Combinations for Reference: ref_list = ['Albright-Davis', 'Albrihgt-Bowden', 'Albright-Hendricks', 'Albright-Parsons', 'Albright-Rockville', 'Davis-Bowden', 'Davis-Hendricks', 'Davis-Parsons', 'Davis-Rockville', 'Bowden-Hendricks', 'Bowden-Parsons', 'Bowden-Rockville', 'Hendricks-Parsons', 'Hendricks-Rockville', 'Parsons-Rockville'] # Calling DataFrame constructor after Zipping: pearsondf = pd.DataFrame(list(zip(ref_list, pearson_array)), columns =['Gauge Combination', 'Pearson Coefficient Value']) # + [markdown] id="uPklKjruyb7X" # # <center> **Results of the Study:** # # **Major Results Products:** # * Time Series of Discharge Isolating Each Location Displaying Baseflow and Event Flow Discharge # * Conglomerated Time Series of Event Flow Discharges by Location in terms of cm/hr and Z-Scores # * Bar Graph Comparing the Total Discharge Volume by Location for the Storm Event # * Table of Basic Statistics of Event Flow by Location # * Table and Bar Graph of Pearson Coefficient Values by Coupled Stream Gauge Location Combination # # # + id="lwNetvnCyi5Z" # First Cell to Dirve Results # + [markdown] id="Xtdmz7ulzEv_" # ## <center> **Hydrograph Seperation Function to Visualize and Quantify Baseflow** # # Generation of **Time Series Curves** for each study site location in terms of cm/hr discharge for the seperate **baseflow** and **observed discharge** curves. A line of event flow is not included on these plots, but is calculated in the background of the below functions. As previoulsy mentioned, baseflow can never exceed the observed discahrge, such for periods of time not identified with the storm event, baseflow was assumed to be equal to the observed discharge. This was done to not need to extract the inlfuence of previous storm events or other influences onto the rising limb discharge curve. # # **What Does This Show?:** # * What Are the Baseflow and Event Flow Discharge Values for a Given Site? # * How Does Baseflow and Event Flow Vary in Magnitude Over Time? # * How Does the Baseflow Relate Itself to the Event Flow for a Specific Site? # + id="kajAE_SjzTSb" #%% Running Functions per Watershed: #Create Empty Array for Storm Totals: storm_totals = [] #Define Function for Running Functions per Watershed: def watershed_function(df): #Run Hydrograph Seperation Function: (baseQ, antQ_date, antQ_val, peakQ_date, peakQ, end_of_event, end_Q) = hydrograph_sep(df, watershed_area) #Integrating Storm Total: storm_frame= df[antQ_date : end_of_event] discharge_total= storm_frame['Discharge (cm/hr)'].sum() storm_totals.append(discharge_total) #Run Time Series Plotting Function: timeseriesplot(df, df.index[0], df.index[-1], baseQ) #For Loop to Iterate Through Locations: for item in df_list: watershed_function(item) #Output Text Statement to Let Me Know This Ran: print('') print('Congrats, You Probably Have Some Graphs Now...') print('') # + [markdown] id="mMsT-tqPzUUA" # ## <center> **Visualziation of Event Flow Discharge Curves** # # Generation of Time Series Curves for Each Location in terms of cm/hr discharge and Z-Score values. These plots dispaly the result of the previous iterative plots of observed discharge and baseflow. Such, this is the subtractive product of those individual plots. As previously mentioned, baseflow was assumed to be equal to observed flow for periods outside of the storm event duration to not require to analyze the influence of previous storm events on the hydrograph or other outside influences. # # **What Does This Show?:** # * How does the magnitude of peak event flow vary by site? # * How does the magnitude of event flow discharge change over time? # * How do the curves of event flow discharge vary from site to site? # + id="34ml17n6zcb5" #%% Plotting Effective Flow Curves Over Full Duration: #Define Function for Variable Plotting Windows: def eventflow_plotting(start_window, end_window): #Create Plotting Area: fig, ax1 = plt.subplots() #Plot Discharge Data: #Albright: ax1.plot(df_albright['Eff Flow (cm/hr)'], ',', linestyle='-', color='navy', label='Albright') #Blackwater: ax1.plot(df_blackwater['Eff Flow (cm/hr)'], ',', linestyle='-', color='grey', label='Blackwater') #Bowden: ax1.plot(df_bowden['Eff Flow (cm/hr)'], ',', linestyle='-', color='dodgerblue', label='Bowden') #Hendricks: ax1.plot(df_hendricks['Eff Flow (cm/hr)'], ',', linestyle='-', color='maroon', label='Hendricks') #Parsons: ax1.plot(df_parsons['Eff Flow (cm/hr)'], ',', linestyle='-', color='orange', label='Parsons') #Rockville: ax1.plot(df_rockville['Eff Flow (cm/hr)'], ',', linestyle='-', color='darkgreen', label='Rockville') #Axis Formatting: ax1.set_ylim(bottom = 0) ax1.set_xlim(df_albright.index[start_window], df_albright.index[end_window]) fig.autofmt_xdate() #Axis Labels: ax1.set_ylabel('Discharge (cm/hr)', color='k', fontweight="bold", fontsize= 12) ax1.set_xlabel('Date', color='k', fontweight="bold", fontsize= 12) fig.suptitle('Event Flow Discharge Curves for Cheat River Watershed', fontweight= "bold", fontsize=18) #Legend: fig.legend(bbox_to_anchor= (1.15, 0.75)) #Function for Full Duration: eventflow_plotting(0, -1) #Function for Zoomed-In Duration: eventflow_plotting(275, -675) # + id="H7HiyL1FzfGw" #%% Plotting Z-Scored Effective Flow Curves Over Full Duration: #Create Function for Z-Score Plotting: def zscore_event_plotting(start_window, end_window): #Create Plotting Area: fig, ax1 = plt.subplots() #Plot Discharge Data: #Albright: ax1.plot(df_albright['Z-Score EffQ'], ',', linestyle='-', color='navy', label='Albright') #Blackwater: ax1.plot(df_blackwater['Z-Score EffQ'], ',', linestyle='-', color='grey', label='Blackwater') #Bowden: ax1.plot(df_bowden['Z-Score EffQ'], ',', linestyle='-', color='dodgerblue', label='Bowden') #Hendricks: ax1.plot(df_hendricks['Z-Score EffQ'], ',', linestyle='-', color='maroon', label='Hendricks') #Parsons: ax1.plot(df_parsons['Z-Score EffQ'], ',', linestyle='-', color='orange', label='Parsons') #Rockville: ax1.plot(df_rockville['Z-Score EffQ'], ',', linestyle='-', color='darkgreen', label='Rockville') #Axis Formatting: ax1.set_xlim(df_albright.index[start_window], df_albright.index[end_window]) fig.autofmt_xdate() #Axis Labels: ax1.set_ylabel('Discharge (cm/hr)', color='k', fontweight="bold", fontsize= 12) ax1.set_xlabel('Date', color='k', fontweight="bold", fontsize= 12) fig.suptitle('Z-Scored Event Discharge Curves', fontweight= "bold", fontsize=18) #Legend: fig.legend(bbox_to_anchor= (1.15, 0.75)) #Function for Full Duration: zscore_event_plotting(0, -1) #Function for Selected Window: zscore_event_plotting(275, -675) # + [markdown] id="GaIt46FmzkWx" # ## <center> **Plotting Volumes of Total Discharge per USGS Gauge Location:** # # This graphical output is the creation of a bar graph plot to display how much water ran through each gauging station for the duration of the full storm event. This plot will help us to graphically represent where a certain volume of flow was located during the storm. In essence, this allows us to view how much water moved through a particular area, and such, perhaps which area the storm was centered upon. # # # # **Effectively, this plot is a product of two variables:** # * Magnitude of Discharge # * Duration of the Storm Event # # # # Our purposes will allow us to see that even though these stream gauging locations might vary in scale, if the normalized amount of flow through a specific pattern of stations is visible in this plot, we can begin to decypher how a "pulse" of water moves thorughout a watershed during a single large-scale storm event # + id="QSO6jLf3zr3Q" #%% Plotting Total Discharge Storm Event Volumes: #Create Plotting Area: fig = plt.figure() ax = fig.add_axes([0,0,1,1]) #Add Data Bars: locations = ['Albright', 'Davis', 'Bowden', 'Hendricks', 'Parsons', 'Rockville'] discharge_totals = storm_totals ax.bar(locations, discharge_totals, color = 'dodgerblue') #Axis Labels: ax.set_ylabel('Total Discharge (cm)', color='k', fontweight="bold", fontsize= 12) ax.set_xlabel('Location of Measurement', color='k', fontweight="bold", fontsize= 12) ax.set_title('Total Storm Event Discharge Outputs', fontweight= "bold", fontsize=18) #Display Bar Plot: plt.show() # + [markdown] id="PE_6IGuAzuaQ" # ## <center> **Determine Basic Statistical Properties of Event Flow per USGS Gauge Location:** # # A list of variables per site location for the duration of the storm event. While not inherrently powerful on their own, each of these metrics provide a point of comparison across each site location. Comparing each of these metrics across the full watershed extent can provide us insight as to what observable trends are present for each of these metrics # # **Statistics Calculated:** # * Maximum Event Discharge # * Maximum Observed Discharge # * Average Event Discharge # * Average Observed Discharge # # **What Can This Tell Us?:** # * How does peak event discharge vary across numerous sites? # * How does the average event discharge vary across numerous sites? # * How do the different study site gauges vary in magnitude? # + id="Fz7a--G0z4MW" #%% Determine Maximum Values of Discharge: #Create Empty Arrays for Quick Reference of Values: #Maximum Discharge Values: max_q = [] max_q_zscore= [] #Maximum Event Flow Discharge Values: highest_events= [] highest_events_zscore= [] #Average Discharge: average_Q= [] average_eventQ= [] #Function for Max Discharge: def max_discharge(df): #Calculate Maximum: big_q= df['Discharge (cm/hr)'].max() #Append to List: max_q.append(big_q) #Function for Max Discahrge Z-Score: def max_discharge_zscore(df): big_q_zscore= df['Z-Score Q'].max() max_q_zscore.append(big_q_zscore) #Function for Max Discharge: def max_event(df): max_event_val= df['Eff Flow (cm/hr)'].max() highest_events.append(max_event_val) #Function for Max Discahrge Z-Score: def max_event_zscore(df): max_event_valz= df['Z-Score EffQ'].max() highest_events_zscore.append(max_event_valz) #Function for Average Event Discahrge: def avg_event_discharge(df): avg_event_valz= df['Eff Flow (cm/hr)'].mean() average_eventQ.append(avg_event_valz) #Function for Average Discahrge: def avg_discharge(df): avg_valz= df['Discharge (cm/hr)'].mean() average_Q.append(avg_valz) #For Loop to Iterate Through Gauges: for item in df_list: #Iterate Through Functions: max_discharge(item) max_discharge_zscore(item) max_event(item) max_event_zscore(item) avg_event_discharge(item) avg_discharge(item) #Output Text Statement: print('') print('For Loop Complete: Values Appended to Lists') print('') # + [markdown] id="Pu7AlfPpz-F-" # ## <center> **Statistical Evaluation of Pearson Correlation Coefficients:** # # A list of pearson coefficient curve similarity value per coupled site location for the duration of the storm event. All possible dually-coupled site location combination were utilized for the study and were named on the basis of which two sites each coupling included. A table and visiualzied bar grpah format, along with basic statistics, were produced for the collection of pearson coefficients. # # While not inherrently powerful on their own, each of these metrics provide a point of comparison across each site location. Comparing each of these metrics across the full watershed extent can provide us insight as to what observable trends are present between any combination of site locations. Effectively, this can provide a means of how a watershed is geographically impact by a given storm event. # # **Statistics Calculated:** # * Maximum Pearson Coefficient # * Minimum Pearson Coefficient # * Average Pearson Coefficient # # **What Can This Tell Us?:** # * How similar is a discharge curve across study sites? # * Does the location of a storm event affect the discharge curve geometry? # * How do the different study site combination pearson coefficients vary in magnitude? # + id="AHGBF6hm0Fhx" #%% Basic Insight to Pearson Coefficients: #Most Similar Gauge Profiles: max_pearson = max(pearson_array) #Output Text Statement: print('') print('The Highest Pearson Coefficient Was %6.4f' %max_pearson) print('') #Least Similar Gauge Profiles: min_pearson = min(pearson_array) #Output Text Statement: print('') print('The Lowest Pearson Coefficient Was %6.4f' %min_pearson) print('') #Average Across Watershed: #Sum of Values: sum_pearson= sum(pearson_array) #LEngth of Array as Proxy of Number of Records: len_pearson= len(pearson_array) #Mean of Array Values: avg_pearson = sum_pearson / len_pearson #Output Text Statement: print('') print('The Average Pearson Coefficient Was %6.4f' %avg_pearson) print('') # + id="QDFH3ftl20A8" #%% Bar Graph to Display Pearson Values Not in Table: #Create Plotting Area: fig = plt.figure() ax = fig.add_axes([0,0,1,1]) #Add Data Bars: locations = ref_list bars = pearson_array ax.bar(locations, bars, color = 'navy') #Axis Labels: ax.set_ylabel('Pearson Coefficient Value', color='k', fontweight="bold", fontsize= 12) ax.set_xlabel('USGS Gauge Location Combination', color='k', fontweight="bold", fontsize= 12) ax.set_title('Pearson Coefficient Values by Gauge Station Combination', fontweight= "bold", fontsize=18) #Display Bar Plot: plt.show() # + [markdown] id="R1OJ75Ev222v" # # <center> **Discussion of Results** # # The principle findings of this study are contingent on the geometry of the curves geometries and the storm locations. While it is obvious that discharge curves will vary by location, the results of this study more-so suggests that the discharge curves will vary at a higher degree based upon the location of the storm opposed to a gauge being located in an upstream or downstream position within the watershed. # # ## **Conclusion 1: Magnitude of Discharge is Correlated to the Magnitude of Stream** # # * In the Z-Scored time series, almost all of the curves displayed a similar curve geometry for the peak discharge value. The only exception to this trend was the Blackwater River in Davis, WV which displayed a dually-peaked curve geometry slighlty lower than that of the other gauges. While the Albright, WV gauge was slighlty delayed in its curve, the curve geometry is similar to the other four gauges. Since this is a time series, this is perhaps more indicative of the storm system moving through the watershed than anything related to the arrival of an increased volume of water. # # * The average Pearson Coefficient for all gauges was a value of 0.4729 which displays a decent amount fo correlation even after the average likely being degraded follwoing disimilarities with the Albright and Blackwater gauges. In fact, the Hendricks-Parsons combination displayed a 0.9665 coefficient which is a proxy for curves of 96.65% positive similarity; obviously, incredibly high correlation. This value is not isolated however, as 7 of the 14 gauge combination displayed a Pearson Coefficient above 0.65. # # ##**Conclusion 2: Guages in Similar Locations or Rivers Display Similar Magnitudes of Discharge** # # * Gauges that are spatially closest to one another are those that often had the highest degree fo similarity to one another. For example, the gauges of Hendricks and Parsons had the single largest Pearson Coefficient value of 0.9665. While distance between any pair of gauges was not described in this study, this pairing is of the two single closest gauges in the study. # # * Furthermore, this trend seems to hold over other local gauge pairs wherein the pairings of Albright-Rockville, Davis-Parsons, and Davis-Hendricks also all displayed relatively high Pearson Coefficient values between one another. # # * This trend appears to be independent of the magnitude fo the river itself as the smallest observable discharge gauge (Rockville) was highly correlated with the largest observable discharge gauge (Albright). The opposite was true as well, where rivers of similar size within the Cheat River watershed, such as the Blackwater River (Davis) and Dry Fork (Hendricks) also maintained a high Pearson Coefficient # # ##**Conclusion 3: The Progress of the Storm System Appears Constant Throughout Most of the Cheat River Watershed** # # * The high degree of similarity of many of the discharge curves displays that the storm system uniformly progressed over the Cheat River watershed. While this study did not evaluate the quantity or rate of precipitation at each gauge location due to a lack of regular, local data coverage, we can observe highly correlated Pearson Coefficients for many of the gauges showing the hydrograph response was not only of a similar geometry, but a similar timing and magnitude for many of the site location pairs. # # * Visually, we can see that many of the discharge curves, both for cm/hr and normalized Z-Scores, display a similar progress pattern as well, with a very sharp rise on the rising limb, a pronounced peak, and then a steady exponential fall returning to baseflow conditions. The rate of exponential decay was not observed in this study as well, however the similar curve geometries proxied by the Pearson Coefficient indicate it would likely be similar. # # * The major "outlier" in terms of this storm system was that of the Blackwater River at Davis, WV. A logical explanation of why this site displayed such a different geometry compared to the other gauges is likely the storm did not progress thorugh the area in the same means. Given Davis, WV is immediately located upon the Appalachian Front, perhaps an orographic effect prohibited the same progression of the storm system. # # + id="uaMctww328ZL" #Code Cell to Advance to Next Section Header # + [markdown] id="zx6SeiOy2_q0" # # <center> **Major Conclusions of Study:** # # The goals of this study were successful in terms of evaluating how a single large storm event progresses through and affects the hydrographic response of a given watershed. While it was hypothesized that the influence of the storm event would cascade in an upstream to downstream method within the watershed, this hypothesis was proven false as no correlation seems to be apparent between the magnitude of flow and the positionality of the gauges within the watershed. # # . # # However, based upon the trends observed from the pearson coefficients of curve similarity, there is some evidence to state the driving factor of hydrologic repsonse might be associated with the path and location of the storm system opposed to the location of any particular gauge. This is logical, as a watershed extent can often be quite large, and especially in a topographically varied watershed such as the Cheat River watershed area, precipitation is not evenly distributed over the full extent. Different nuanced factors such as the orographic positionality (i.e. windward v. leeward), elevation, and location of the gauging site in reference to other locations are all drivers of how much rainfall a particular area did or did not recieve drinving the hydrologic response. # # . # # Additionally, it was assumed all surface conditions within the watershed were fully conservative. All flows in the river were assumed to be instanteously dissapating or cumulative and no contributions were expected to be made to groundwater; a fully conservative model of fate transport. Moving forward, to understand how these conditions might vary across numerous sites, it could be helpful to add an explicitly spatial component to this study in terms of a flow accumulation raster. In those regards, the amount of water in a stream system at any given time can be assumed to move downstream and compared to the observed value. If a consistent ratio of discharge is bieng met on a per-cell basis, then a proxy value of upstream groundwater contribution can be determined. This would greatly improve the future results of the study giving it localized spatial vaildation of results as well as understanding the role other, non-observed, influences play in the movement of event flow in the Cheat River watershed. # # . # # As it currently stands, the data produced from this study is most helpful to those in the Cheat River watershed as well as disaster relief policy and community planners. The lingering socioeconomic impacts of the 1985 Election Day Floods and other large-scale flooding events such as the observed 2019 storm event all produce a means of evaluating how the Cheat River interacts with any given storm. Predicting the response of the river not only provides a proxy to assist the communitites, but also provides a basis to understand how a given town will be impacted as well during a larger flooding event. Given these towns and communities are often low-priority sites for disatter relief efforts, providing validation of increased vulnerabily, or modelled and quantified vulnerability, could justify resource allocation being put towards these communities. Essentially, having the quantification of necessary resources, it is more likely that resouces would be allocated towards a community where wastefulness is an economic concern. # + id="JsVeEbJX3HHw" #Code Cell to Advance to the Next Section Header # + [markdown] id="K0wgy9ct3Lnk" # # <center> **Works Cited** # # * <NAME>., <NAME>., & <NAME>. (2007). Rainfall—runoff response, event-based runoff coefficients and hydrograph separation. Hydrological Sciences Journal, 52(5), 843-862. # # * <NAME>. (1990). Floods in West Virginia, Virginia, Pennsylvania, and Maryland, November 1985 (Vol. 88, No. 4213). US Department of the Interior, US Geological Survey. # # * <NAME>. (2018). Assessment of hydrological drought in Northern Ontario using standardized streamflow index (Doctoral dissertation). # # * <NAME>., <NAME>., <NAME>., & <NAME>. (2020). Flow-Mediated Vulnerability of Source Waters to Elevated TDS in an Appalachian River Basin. Water, 12(2), 384. # # * <NAME>., <NAME>., <NAME>., & <NAME>. (2021). Cheat Water Resources: Assessing Climatology and Land Cover Trends and Evaluating Flood Risk of the Cheat River. # # * <NAME>., & <NAME>. (2004). Water quality variability in tributaries of the Cheat River, a mined Appalachian Watershed. In 2004 National Meeting of the American Society of Mining and Reclamation and the 25th West Virginia Surface Mine Drainage Task Force. American Society of Mining and Reclamation, Morgantown, West Virginia (pp. 1484-1504). # # * <NAME>., & <NAME>. (2003). Completed and future projects on the Cheat River and use of TMDL trading. In West Virginia Surface Mine Drainage Task Force Symposium, April. #
Hoheneder_ESCI895_FinalProject.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd df=pd.read_excel("Yashin.xlsx") df # + x=df.loc[0,"Salary"] a=df.loc[0,"Age"] if x<=250000 and a<=60: y=0 if x>250000 and x<=500000 and a<=60: y=x*10/100 if x>500000 and x<=1000000 and a<=60: y=x*20/100 if x>1000000 and a<=60: y=x*30/100 if x<=300000 and a>60 and a<=80: y=0 if x>300000 and x<=500000 and a>60 and a<=80: y=x*10/100 if x>500000 and x<=1000000 and a>60 and a<=80: y=x*20/100 if x>1000000 and a>60 and a<=80: y=x*30/100 if x<=500000 and a>80: y=0 if x>500000 and x<=1000000 and a>80: y=x*20/100 if x>1000000 and a>80 and a<=100: y=x*30/100 print("Tax",y) # -
Salary with age.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example: Attenuation of a WR-6.5 Waveguide Loaded with Different Dielectrics # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt from scipy.constants import mil, milli # Waveguide package # GitHub: https://github.com/garrettj403/Waveguide # PyPI: pip install waveguide from waveguide import conductor_loss, dielectric_loss, cutoff_frequency, np2db # Scientific Matplotlib plots (optional) # GitHub: https://github.com/garrettj403/SciencePlots # PyPI: pip install SciencePlots plt.style.use(["science", "notebook"]) # - # Waveguide dimensions for WR-6.5 a, b = 65 * mil, 65 / 2 * mil # # Dielectric: Alumina # Relative permittivity er = 10 # Cutoff frequencies print("TE10 cutoff: {:.1f} GHz".format(cutoff_frequency(a, b=b, er=er, m=1, n=0)/1e9)) print("TE20 cutoff: {:.1f} GHz".format(cutoff_frequency(a, b=b, er=er, m=2, n=0)/1e9)) # Frequency sweep f = np.linspace(29e9, 200e9, 501) fghz = f / 1e9 # + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18,6)) fig.suptitle("Loss of a WR-6.5 Waveguide Loaded with Alumina", fontsize=18) cond = [2e7, 4e7, 6e7, 8e7] for _cond in cond: alpha_c = conductor_loss(f, _cond, a, b, er=er) ax1.plot(fghz, alpha_c, label=r"$\sigma={:.0f}\times10^7$ S/m".format(_cond/1e7)) ax1.legend(title=r"$\alpha_c$, $\varepsilon_r=10$") ax1.set_xlabel("Frequency (GHz)") ax1.set_ylabel("Conductor attenuation (Np/m)") ax1.set_xlim([29, 200]) ax1.set_ylim([0, 2.1]) tand = [1e-5, 5e-5, 1e-4, 2e-4] tand.reverse() for _tand in tand: _er = er * (1 - 1j * _tand) alpha_d = dielectric_loss(f, a, b=b, er=_er) ax2.plot(fghz, alpha_d, label=r"$\tan\,\delta={:.0f}\times10^{{-4}}$".format(_tand*1e4)) ax2.legend(title=r"$\alpha_d$, $\varepsilon_r=10$") ax2.set_xlabel("Frequency (GHz)") ax2.set_ylabel("Dielectric attenuation (Np/m)") ax2.set_xlim([29, 200]) ax2.set_ylim([0, 2.1]) fig.savefig("results/waveguide-attenuation-wr-6.5-alumina.png", dpi=400); # - # # Dielectric: HDPE # Relative permittivity er = 2.3 # Cutoff frequencies print("TE10 cutoff: {:.1f} GHz".format(cutoff_frequency(a, b=b, er=er, m=1, n=0)/1e9)) print("TE20 cutoff: {:.1f} GHz".format(cutoff_frequency(a, b=b, er=er, m=2, n=0)/1e9)) # Frequency sweep f = np.linspace(60e9, 200e9, 501) fghz = f / 1e9 # + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18,6)) fig.suptitle("Loss of a WR-6.5 Waveguide Loaded with HDPE", fontsize=18) cond = [2e7, 4e7, 6e7, 8e7] for _cond in cond: alpha_c = conductor_loss(f, _cond, a, b, er=er) ax1.plot(fghz, alpha_c, label=r"$\sigma={:.0f}\times10^7$ S/m".format(_cond/1e7)) ax1.legend(title=r"$\alpha_c$, $\varepsilon_r=2.3$", loc=1) ax1.set_xlabel("Frequency (GHz)") ax1.set_ylabel("Conductor attenuation (Np/m)") ax1.set_xlim([60, 200]) ax1.set_ylim([0, 2]) tand = [1e-5, 5e-5, 1e-4, 2e-4] tand.reverse() for _tand in tand: _er = er * (1 - 1j * _tand) alpha_d = dielectric_loss(f, a, b=b, er=_er) ax2.plot(fghz, alpha_d, label=r"$\tan\,\delta={:.0f}\times10^{{-4}}$".format(_tand*1e4)) ax2.legend(title=r"$\alpha_d$, $\varepsilon_r=2.3$") ax2.set_xlabel("Frequency (GHz)") ax2.set_ylabel("Dielectric attenuation (Np/m)") ax2.set_xlim([60, 200]) ax2.set_ylim([0, 2]) fig.savefig("results/waveguide-attenuation-wr-6.5-hdpe.png", dpi=400); # - # # Dielectric: HDPE at 4K # Relative permittivity er = 2.4 # Cutoff frequencies print("TE10 cutoff: {:.1f} GHz".format(cutoff_frequency(a, b=b, er=er, m=1, n=0)/1e9)) print("TE20 cutoff: {:.1f} GHz".format(cutoff_frequency(a, b=b, er=er, m=2, n=0)/1e9)) # Frequency sweep f = np.linspace(110e9, 170e9, 201) fghz = f / 1e9 # + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18,6)) fig.suptitle("Loss of a 49 mm WR-6.5 Waveguide Loaded with HDPE", fontsize=18) cond = [6e7, 7e7, 8e7, 9e7, 10e7] for _cond in cond: alpha_c = conductor_loss(f, _cond, a, b, er=er) ax1.plot(fghz, np2db(alpha_c) * 49 * milli, label=r"$\sigma={:.0f}\times10^7$ S/m".format(_cond/1e7)) ax1.legend(title=r"$\alpha_c$, $\varepsilon_r={:.1f}$".format(er), loc=1) ax1.set_xlabel("Frequency (GHz)") ax1.set_ylabel("Conductor Loss (dB)") ax1.set_xlim([fghz.min(), fghz.max()]) ax1.set_ylim([0, 1]) tand = [1e-5, 2e-5, 3e-5, 4e-5, 5e-5] tand.reverse() for _tand in tand: _er = er * (1 - 1j * _tand) alpha_d = dielectric_loss(f, a, b=b, er=_er) ax2.plot(fghz, np2db(alpha_d) * 49 * milli, label=r"$\tan\,\delta={:.0f}\times10^{{-5}}$".format(_tand*1e5)) ax2.legend(title=r"$\alpha_d$, $\varepsilon_r={:.1f}$".format(er)) ax2.set_xlabel("Frequency (GHz)") ax2.set_ylabel("Dielectric Loss (dB)") ax2.set_xlim([fghz.min(), fghz.max()]) ax2.set_ylim([0, 1]) fig.savefig("results/waveguide-attenuation-wr-6.5-hdpe-db.png", dpi=400);
examples/wr-6.5-waveguide-attenuation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt incomes = np.random.normal(100.0,50.0,10000) plt.hist(incomes,50) plt.show() # - incomes.std() #standard deviation of incomes incomes.var() #variance of incomes
day12/1 stddevvariance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Spooky Author Identification exercise # # Spooky authors dataset. Mоже да се изтегли от тук: # https://www.kaggle.com/c/spooky-author-identification # # #### Изводи # # След направените анализи в/у feature-те по време на лекцията на курса можем да стигнем до следните наблюдения и изводи: # - ... # - ... # # Също така можем да опитаме някакъв feature engineering, "почиставне" и обработка на данните: # - ... # - ... # # ### Стратегия # - ... # - ... # #### Започваме # # ... # %config IPCompleter.greedy=True # !pip install numpy scipy matplotlib ipython scikit-learn pandas pillow mglearn # ... # + import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import mglearn from IPython.display import display # %matplotlib inline # - # ... # + import pandas as pd train = pd.read_csv("data/spooky-authors/train.zip", index_col=['id']) test = pd.read_csv("data/spooky-authors/test.zip", index_col=['id']) sample_submission = pd.read_csv("data/spooky-authors/sample_submission.zip", index_col=['id']) print(train.shape, test.shape, sample_submission.shape) print(set(train.columns) - set(test.columns)) # - train.head(5) # ... # !pip install nltk import nltk nltk.download('stopwords') params = {"features__ngram_range": [(1,1), (1,2), (1,3)], "features__analyzer": ['word'], "features__max_df": [1.0, 0.9, 0.8, 0.7, 0.6, 0.5], "features__min_df": [2, 3, 5, 10], "features__lowercase": [False, True], "features__stop_words": [None, stopwords], "features__token_pattern": [r'\w+|\,', None], "clf__alpha": [0.01, 0.1, 0.5, 1, 2] } from sklearn.model_selection import RandomizedSearchCV from sklearn.metrics import log_loss def report(results, n_top=5): for i in range(1, n_top + 1): candidates = np.flatnonzero(results['rank_test_score'] == i) for candidate in candidates: print("Model with rank: {0}".format(i)) print("Mean validation score: {0:.3f} (std: {1:.3f})".format( results['mean_test_score'][candidate], results['std_test_score'][candidate])) print("Parameters: {0}".format(results['params'][candidate])) print("") # + from sklearn.naive_bayes import MultinomialNB from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.pipeline import Pipeline from sklearn.model_selection import cross_val_score # from nltk.corpus import stopwords # stopset = set(stopwords.words('english')) # from sklearn.ensemble import VotingClassifier pipeline = Pipeline([ ('features', TfidfVectorizer( # ngram_range=(1,2), min_df=2, # max_df=0.8, lowercase=False, # token_pattern=r'\w+|\,', stop_words=stopset )), ('clf', MultinomialNB( # alpha=0.01 )), ]) # print(pipeline.named_steps['features']) # print(cross_val_score(pipeline, train.text, train.author, cv=3, n_jobs=3)) # print(cross_val_score(pipeline, train.text, train.author, cv=3, n_jobs=3, # scoring='neg_log_loss')) random_search = RandomizedSearchCV(pipeline, param_distributions=params, scoring='neg_log_loss', n_iter=20, cv=3, n_jobs=4) random_search.fit(train.text, train.author) report(random_search.cv_results_) # - pipeline = pipeline.fit(train.text, train.author) print(pipeline.predict_proba(test[:10].text)) test_predictions = pipeline.predict_proba(test.text) print(pipeline.classes_) submit_file = pd.DataFrame(test_predictions, columns=['EAP', 'MWS', 'HPL'], index=test.index) submit_file.head(10) submit_file.to_csv("data/spooky-authors/predictions.csv")
03-hw-spooky-authors-exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Working with Instrumental Descriptions # # the instrumental description is loaded by the event source, and consists of a hierarchy of classes in the ctapipe.instrument module, the base of which is the `SubarrayDescription` # First, let's open a file and load a single event so we get the instrument info in the event.inst container. # + from ctapipe.utils.datasets import get_dataset from ctapipe.io.hessio import hessio_event_source import numpy as np #filename = get_dataset("gamma_test_large.simtel.gz") # try this one as well filename = get_dataset("gamma_test.simtel.gz") source = hessio_event_source(filename) event = next(source) del source # close the file # - # ### now let's play with the SubarrayDescription: # + subarray = event.inst.subarray subarray.info() # - subarray.to_table() # You can also get a table of just the `OpticsDescriptions` (`CameraGeometry` is more complex and can't be stored on a single table row, so each one can be converted to a table separately) subarray.to_table(kind='optics') # Make a sub-array with only SC-type telescopes: tab = subarray.to_table() sc_tels = tab[tab['mirror_type']=='SC']['tel_id'] # select tel_id of entries where the mirror type is SC newsub = subarray.select_subarray("SCTels", sc_tels) newsub.info() # can also do this by using `Table.group_by` gtab = tab.group_by('mirror_type') sc = gtab.groups[1] newsub = subarray.select_subarray("SCTels", sc['tel_id']) newsub.info() # ### Explore some of the details of the telescopes tel = subarray.tel[5] tel tel.optics.mirror_area tel.optics.num_mirror_tiles tel.optics.effective_focal_length tel.camera tel.camera.pix_x # %matplotlib inline from ctapipe.visualization import CameraDisplay CameraDisplay(tel.camera) CameraDisplay(subarray.tel[98].camera) # ## Plot the subarray # # We'll make a subarray by telescope type and plot each separately, so they appear in different colors. We also calculate the radius using the mirror area (and exagerate it a bit). # # This is just for debugging and info, for any "real" use, a `visualization.ArrayDisplay` should be used subarray.peek() subarray.footprint # ### With Pandas # If you prefer working with *Pandas* `DataFrames` instead of *AstroPy* `Tables`, you can always convert between the two: df = subarray.to_table().to_pandas() df.set_index('tel_id') g = df.groupby('tel_description') g.groups g.groups['LST:LSTCam'] df.loc[g.groups['LST:LSTCam']] lsts = subarray.select_subarray("LSTs", df.loc[g.groups['LST:LSTCam']]['tel_id']) lsts.info() lsts.peek() lsts.footprint
examples/notebooks/InstrumentDescription.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="zHnIZCXA8foC" colab_type="text" # #Calteh Birds Classification # This Project is done using Google Colaboratory. 1st you have to mount google drive with Colab and place dataset in zipped format on Google drive to access it. This dataset contains birds images of 200 categories. Training images are 5994 & test images are 5794. I have achieved top1 accuracy 77.32% and top5 accuracy 94.30% on test set. This project is done using a ResNet18(pretrained on imagenet). # + id="Wr5c0m2Y8o-Y" colab_type="code" outputId="341c549a-cb73-4501-a49a-30abb02324e0" executionInfo={"status": "ok", "timestamp": 1586113089699, "user_tz": -120, "elapsed": 15715, "user": {"displayName": "<NAME>\u00e1nchez", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhXK3QIvG6hvwAAcWHNiK70IOW8eLF8nMd6pm19Qg=s64", "userId": "05070611291973281760"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} # !unzip drive/My\ Drive/TFM/Birds-Task-PyTorch/CUB_200_PyTorch.zip # this line copies the birds dataset from Google drive to Google Colab and also Unzip it for further processing. # + id="lQjl7iri8eti" colab_type="code" outputId="300d36ae-5aac-4576-ec10-c0dd521b715f" executionInfo={"status": "ok", "timestamp": 1586113093820, "user_tz": -120, "elapsed": 19239, "user": {"displayName": "<NAME>\u00e1nchez", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhXK3QIvG6hvwAAcWHNiK70IOW8eLF8nMd6pm19Qg=s64", "userId": "05070611291973281760"}} colab={"base_uri": "https://localhost:8080/", "height": 51} # here are the necessary imports from __future__ import print_function, division import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable import numpy as np import torchvision from torchvision import datasets, models, transforms import matplotlib.pyplot as plt import time import os import seaborn as sns plt.ion() # + [markdown] id="-CagfeVv8mxe" colab_type="text" # #Load Data # We will use torchvision and torch.utils.data packages for loading the data. For the training, i have applied transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. I also made it sure that the input data is resized to 224x224 pixels as required by the pre-trained networks. # # The testing set is used to measure the model's performance on data it hasn't seen yet. For this i have not performed any scaling or rotation transformations, but i had resized and then cropped the images to the appropriate size. # # The pre-trained network i have used was trained on the ImageNet dataset where each color channel was normalized separately. For all three sets i have normalized the means and standard deviations of the images to what the network expects. For the means, it's [0.485, 0.456, 0.406] and for the standard deviations [0.229, 0.224, 0.225], calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1. # + id="PSjWM1KB8ewO" colab_type="code" colab={} # Data augmentation and normalization for training # Just normalization for validation data_transforms = { 'train': transforms.Compose([ transforms.Resize(256), transforms.RandomRotation(45), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = 'CUB_200_PyTorch' # loading datasets with PyTorch ImageFolder image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'test']} # defining data loaders to load data using image_datasets and transforms, here we also specify batch size for the mini batch dataloders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=8, shuffle=True, num_workers=4) for x in ['train', 'test']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'test']} class_names = image_datasets['train'].classes use_gpu = torch.cuda.is_available() # + id="s-eI_ao386lV" colab_type="code" outputId="05d8405b-595c-4f1a-ccce-9805e7ef85fa" executionInfo={"status": "ok", "timestamp": 1586113093821, "user_tz": -120, "elapsed": 17421, "user": {"displayName": "<NAME>\u00e1nchez", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhXK3QIvG6hvwAAcWHNiK70IOW8eLF8nMd6pm19Qg=s64", "userId": "05070611291973281760"}} colab={"base_uri": "https://localhost:8080/", "height": 34} dataset_sizes # + [markdown] id="ZIaFNGkS86nl" colab_type="text" # #Visualize a few images # Let's visualize a few training images so as to understand the data augmentations. # + id="S1zNAEW386p0" colab_type="code" outputId="ce219e21-b831-4ed0-91d8-8a97b36d8efe" executionInfo={"status": "ok", "timestamp": 1586113095130, "user_tz": -120, "elapsed": 17994, "user": {"displayName": "<NAME>\u00e1nchez", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhXK3QIvG6hvwAAcWHNiK70IOW8eLF8nMd6pm19Qg=s64", "userId": "05070611291973281760"}} colab={"base_uri": "https://localhost:8080/", "height": 106} def imshow_tensors(inp, title=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated # Get a batch of training data inputs, classes = next(iter(dataloders['train'])) # Make a grid from batch out = torchvision.utils.make_grid(inputs) imshow_tensors(out, title=[class_names[x] for x in classes]) # + [markdown] id="1Ccj00jo86sP" colab_type="text" # #Training the model # Now, let's write a general function to train a model. I also have written code to save the best checkpoint within Google drive for using next time # + id="tUG_ET7g86ui" colab_type="code" colab={} def train_model(model, criterion, optimizer, num_epochs=10): since = time.time() best_model_wts = model.state_dict() best_acc = 0.0 train_losses, train_accuracies = [], [] test_losses, test_accuracies = [], [] for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'test']: if phase == 'train': #scheduler.step() model.train(True) # Set model to training mode losses_list, accuracies_list = train_losses, train_accuracies else: model.train(False) # Set model to evaluate mode losses_list, accuracies_list = test_losses, test_accuracies running_loss = 0.0 running_corrects = 0 # Iterate over data. for data in dataloders[phase]: # get the inputs inputs, labels = data # wrap them in Variable if use_gpu: inputs = Variable(inputs.cuda()) labels = Variable(labels.cuda()) else: inputs, labels = Variable(inputs), Variable(labels) # zero the parameter gradients optimizer.zero_grad() # forward outputs = model(inputs) _, preds = torch.max(outputs.data, 1) loss = criterion(outputs, labels) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.data.item() #loss.data[0] running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.float() / dataset_sizes[phase] losses_list.append(epoch_loss) accuracies_list.append(epoch_acc) print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) # deep copy the model if phase == 'test' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = model.state_dict() state = {'model':model_ft.state_dict(),'optim':optimizer_ft.state_dict()} # torch.save(state,'./drive/My Drive/TFM/Birds-Task-PyTorch/point_resnet18_best.pth') torch.save(state,'./drive/My Drive/TFM/Birds-Task-PyTorch/main_resnet50_best.pth') print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best test Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) # Return best model and statistics train_losses, train_accuracies = np.array(train_losses), np.array(train_accuracies) test_losses, test_accuracies = np.array(test_losses), np.array(test_accuracies) stats = {'train_loss':train_losses, 'train_acc':train_accuracies, 'test_loss':test_losses, 'test_acc':test_accuracies} return model, stats # + [markdown] id="2UTKs3t3tlwB" colab_type="text" # #Visualizing the model predictions # Generic function to display predictions for a few images # + id="SXk5xoxXtlzA" colab_type="code" colab={} def visualize_model(model, num_images=8): images_so_far = 0 fig = plt.figure() for i, data in enumerate(dataloders['test']): inputs, labels = data #print(labels) if use_gpu: inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda()) else: inputs, labels = Variable(inputs), Variable(labels) # print(labels) #_, lab = torch.max(labels.data, 1) outputs = model(inputs) # print(outputs) _, preds = torch.max(outputs.data, 1) # print(preds) # probs = torch.softmax(outputs, dim=1) # print(probs) for j in range(inputs.size()[0]): images_so_far += 1 ax = plt.subplot(num_images//2, 2, images_so_far) ax.axis('off') ax.set_title('class: {} predicted: {}'.format(class_names[labels.data[j]], class_names[preds[j]])) imshow_tensors(inputs.cpu().data[j]) if images_so_far == num_images: return # + [markdown] id="IPhRc-48tl12" colab_type="text" # #Finetuning the convnet # Load a pretrained Resnet 18 model and reset final fully connected layer. # + id="cRn0FOV0tl4c" colab_type="code" colab={} # model_ft = models.resnet18(pretrained=True) # loading a pre-trained(trained on image net) resnet18 model from torchvision models model_ft = models.resnet50(pretrained=False) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, 200) # changing the last layer for this dataset by setting last layer neurons to 200 as this dataset has 200 categories if use_gpu: # if gpu is available then use it model_ft = model_ft.cuda() #model_ft = model_ft.float() criterion = nn.CrossEntropyLoss() # defining loss function # Observe that all parameters are being optimized optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.0001, momentum=0.9) # + [markdown] id="15M1Ar5Ht350" colab_type="text" # You can load a checkpoint from your my drive or any other place if you have saved it. you have to load weights of model and optimizer # + id="1cRftPmwtl7A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="893d609b-725d-4071-e124-766b73bcd3ed" executionInfo={"status": "ok", "timestamp": 1586113111519, "user_tz": -120, "elapsed": 22105, "user": {"displayName": "<NAME>\u00e1nchez", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhXK3QIvG6hvwAAcWHNiK70IOW8eLF8nMd6pm19Qg=s64", "userId": "05070611291973281760"}} checkpoint = torch.load('./drive/My Drive/TFM/Birds-Task-PyTorch/main_resnet50_best.pth') model_ft.load_state_dict(checkpoint['model']) # optimizer_ft.load_state_dict(checkpoint['optim']) model_ft.eval() # + [markdown] id="RrHeJquOtl96" colab_type="text" # #Train and evaluate # + id="B-PCSL5YtmBG" colab_type="code" outputId="1c75a13d-4b3f-4153-f64b-2aa2e7eb7b95" executionInfo={"status": "ok", "timestamp": 1586055203152, "user_tz": -120, "elapsed": 21885519, "user": {"displayName": "<NAME>\u00e1nchez", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhXK3QIvG6hvwAAcWHNiK70IOW8eLF8nMd6pm19Qg=s64", "userId": "05070611291973281760"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} model_ft, stats = train_model(model_ft, criterion, optimizer_ft,num_epochs=200) # + id="-igAk4QZ9Ecm" colab_type="code" outputId="16fc17a6-a9eb-4bb6-ec5e-54bfa2b36385" executionInfo={"status": "ok", "timestamp": 1586055206412, "user_tz": -120, "elapsed": 3236, "user": {"displayName": "<NAME>\u00e1nchez", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhXK3QIvG6hvwAAcWHNiK70IOW8eLF8nMd6pm19Qg=s64", "userId": "05070611291973281760"}} colab={"base_uri": "https://localhost:8080/", "height": 545} # SHOW LOSS AND ACCURACIES CURVES plt.plot(stats['train_loss'], label='Train') plt.plot(stats['test_loss'], label='Test') plt.title('Epoch loss') plt.legend() plt.show() plt.plot(stats['train_acc'], label='Train') plt.plot(stats['test_acc'], label='Test') plt.title('Epoch accuracy') plt.legend() plt.show() # + [markdown] id="ylnO0JQNtmHv" colab_type="text" # #Checking Model's Predictions # + id="VvvCl-fi86wt" colab_type="code" outputId="15f5522c-0f1d-4977-abc4-53ec6060dfb3" executionInfo={"status": "ok", "timestamp": 1586110131895, "user_tz": -120, "elapsed": 1998, "user": {"displayName": "<NAME>\u00e1nchez", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhXK3QIvG6hvwAAcWHNiK70IOW8eLF8nMd6pm19Qg=s64", "userId": "05070611291973281760"}} colab={"base_uri": "https://localhost:8080/", "height": 633} visualize_model(model_ft) # + [markdown] id="xcbUMJXs86zF" colab_type="text" # # Finding Top-1 & Top-5 accuracy # + id="D8iC2qDJAl7j" colab_type="code" colab={} class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" #with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res def calc_accuracy(model, data): model.eval() if use_gpu: model.cuda() top1 = AverageMeter() top5 = AverageMeter() for idx, (inputs, labels) in enumerate(dataloders[data]): if use_gpu: inputs, labels = inputs.cuda(), labels.cuda() # obtain the outputs from the model outputs = model.forward(Variable(inputs)) prec1, prec5 = accuracy(outputs, Variable(labels), topk=(1, 5)) top1.update(prec1[0], inputs.size(0)) top5.update(prec5[0], inputs.size(0)) return top1 ,top5 top1, top5 = calc_accuracy(model_ft, 'test') # + id="l5B0pv7KsqyO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c0f0c06b-f53a-4b8a-ba96-e2c6425c687d" executionInfo={"status": "ok", "timestamp": 1586113150152, "user_tz": -120, "elapsed": 54165, "user": {"displayName": "<NAME>\u00e1nchez", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhXK3QIvG6hvwAAcWHNiK70IOW8eLF8nMd6pm19Qg=s64", "userId": "05070611291973281760"}} top1.avg, top5.avg # + [markdown] id="IikSVliXAl-R" colab_type="text" # ## Top-1 & Top-5 for train # + id="Y0XfwX8rAl2_" colab_type="code" outputId="49eb5beb-902e-4f42-e5d2-5c2733b1d86a" executionInfo={"status": "ok", "timestamp": 1586113034306, "user_tz": -120, "elapsed": 47468, "user": {"displayName": "<NAME>\u00e1nchez", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhXK3QIvG6hvwAAcWHNiK70IOW8eLF8nMd6pm19Qg=s64", "userId": "05070611291973281760"}} colab={"base_uri": "https://localhost:8080/", "height": 34} top1, top5 = calc_accuracy(model_ft, 'train') top1.avg, top5.avg # + [markdown] id="7lKJsxD9Al0p" colab_type="text" # # Inference for classification # + id="C4JyhzkzBUCm" colab_type="code" colab={} def process_image(image_path): ''' Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array ''' # Open the image from PIL import Image img = Image.open(image_path) # Resize if img.size[0] > img.size[1]: img.thumbnail((10000, 256)) else: img.thumbnail((256, 10000)) # Crop left_margin = (img.width-224)/2 bottom_margin = (img.height-224)/2 right_margin = left_margin + 224 top_margin = bottom_margin + 224 img = img.crop((left_margin, bottom_margin, right_margin, top_margin)) # Normalize img = np.array(img)/255 mean = np.array([0.485, 0.456, 0.406]) #provided mean std = np.array([0.229, 0.224, 0.225]) #provided std img = (img - mean)/std # Move color channels to first dimension as expected by PyTorch img = img.transpose((2, 0, 1)) return img def imshow(image, ax=None, title=None): if ax is None: fig, ax = plt.subplots() if title: plt.title(title) # PyTorch tensors assume the color channel is first # but matplotlib assumes is the third dimension image = image.transpose((1, 2, 0)) # Undo preprocessing mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) image = std * image + mean # Image needs to be clipped between 0 and 1 image = np.clip(image, 0, 1) ax.imshow(image) return ax # + id="9TK-PWYKBUHQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 527} outputId="25f00711-6418-489f-8a74-7a0bf16ccd93" executionInfo={"status": "ok", "timestamp": 1586110215296, "user_tz": -120, "elapsed": 951, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhXK3QIvG6hvwAAcWHNiK70IOW8eLF8nMd6pm19Qg=s64", "userId": "05070611291973281760"}} os.listdir('CUB_200_PyTorch/test/001.Black_footed_Albatross/') # + id="QPFuovKhBUJ6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="a5ad5188-7408-463b-b5f3-4e1507212614" executionInfo={"status": "ok", "timestamp": 1586110216275, "user_tz": -120, "elapsed": 1474, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhXK3QIvG6hvwAAcWHNiK70IOW8eLF8nMd6pm19Qg=s64", "userId": "05070611291973281760"}} image_path = 'CUB_200_PyTorch/test/001.Black_footed_Albatross/Black_Footed_Albatross_0001_796111.jpg' img = process_image(image_path) imshow(img) # + [markdown] id="q_tOWQvCBUFp" colab_type="text" # # Predict Top-k # + id="96mJu4qbBUAZ" colab_type="code" colab={} def predict(image_path, model, top_num=5): # Process image img = process_image(image_path) # Numpy -> Tensor image_tensor = torch.from_numpy(img).type(torch.FloatTensor) # Add batch of size 1 to image model_input = image_tensor.unsqueeze(0) # Probs output = model.forward(Variable(model_input.cuda())) # print(output.shape) probs = torch.softmax(output, dim=1) # Top probs top_probs, top_labs = probs.topk(top_num) top_probs, top_labs = top_probs.data, top_labs.data top_probs = top_probs.cpu().numpy().tolist()[0] top_labs = top_labs.cpu().numpy().tolist()[0] top_birds = [class_names[lab] for lab in top_labs] return top_probs, top_birds def plot_main_prediction(image_path, model): # Set up plot plt.figure(figsize = (6,10)) ax = plt.subplot(2,1,1) # Set up title title_ = image_path.split('/')[2] # Plot flower img = process_image(image_path) imshow(img, ax, title = title_); # Make prediction probs, birds = predict(image_path, model) # Plot bar chart plt.subplot(2,1,2) sns.barplot(x=probs, y=birds, color=sns.color_palette()[0]); plt.show() # + id="J6EqoLJKBT-G" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 607} outputId="220acb58-7ca8-4952-a04a-4185de44c228" executionInfo={"status": "ok", "timestamp": 1586110220562, "user_tz": -120, "elapsed": 1628, "user": {"displayName": "<NAME>\u00e1nchez", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhXK3QIvG6hvwAAcWHNiK70IOW8eLF8nMd6pm19Qg=s64", "userId": "05070611291973281760"}} plot_main_prediction(image_path, model_ft) # + id="0mqFr0--BT71" colab_type="code" colab={} # + id="wucxe9v18eya" colab_type="code" colab={}
Birds-Task-PyTorch/ResNet-Main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import re import tensorflow as tf import numpy as np import time import os import collections import random import nltk import time import pickle # + def clearstring(string): string = re.sub('[^A-Za-z\- ]+', '', string) string = string.split(' ') string = filter(None, string) string = [y.strip() for y in string] return [y.lower() for y in string if len(y) > 3 and y.find('nbsp') < 0] def build_dataset(words, n_words): count = [['UNK', -1]] count.extend(collections.Counter(words).most_common(n_words - 1)) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list() unk_count = 0 for word in words: index = dictionary.get(word, 0) if index == 0: # dictionary['UNK'] unk_count += 1 data.append(index) count[0][1] = unk_count reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reversed_dictionary data_index = 0 # Step 3: Function to generate a training batch for the skip-gram model. def generate_batch(batch_size, num_skips, skip_window): global data_index global data assert batch_size % num_skips == 0 assert num_skips <= 2 * skip_window batch = np.ndarray(shape=(batch_size), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) span = 2 * skip_window + 1 # [ skip_window target skip_window ] buffer = collections.deque(maxlen=span) if data_index + span > len(data): data_index = 0 buffer.extend(data[data_index:data_index + span]) data_index += span for i in range(batch_size // num_skips): context_words = [w for w in range(span) if w != skip_window] words_to_use = random.sample(context_words, num_skips) for j, context_word in enumerate(words_to_use): batch[i * num_skips + j] = buffer[skip_window] labels[i * num_skips + j, 0] = buffer[context_word] if data_index == len(data): for word in data[:span]: buffer.append(word) data_index = span else: buffer.append(data[data_index]) data_index += 1 # Backtrack a little bit to avoid skipping words in the end of a batch data_index = (data_index + len(data) - span) % len(data) return batch, labels # - with open('books/Goblet-Of-Fire.txt', 'r',encoding = "ISO-8859-1") as fopen: vocabulary = clearstring(' '.join(nltk.word_tokenize(fopen.read()))) print('example 10 words:',vocabulary[:10]) print('size corpus:',len(vocabulary)) vocabulary_size = len(list(set(vocabulary))) print('size of unique words:',vocabulary_size) dimension = 128 skip_window = 1 num_skips = 2 batch_size = 64 location = os.getcwd() data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,vocabulary_size) del vocabulary # Hint to reduce memory. print('Most common words (+UNK)', count[:5]) print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]]) valid_size = 16 valid_window = 100 valid_examples = np.random.choice(valid_window, valid_size, replace=False) # + graph = tf.Graph() with graph.as_default(): train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # Ops and variables pinned to the CPU because of missing GPU implementation with tf.device('/cpu:0'): embeddings = tf.Variable(tf.random_uniform([vocabulary_size, dimension], -1.0, 1.0)) embed = tf.nn.embedding_lookup(embeddings, train_inputs) nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, dimension], stddev=1.0 / np.sqrt(dimension))) nce_biases = tf.Variable(tf.zeros([vocabulary_size])) loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=batch_size / 2, num_classes=vocabulary_size)) optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss) # Compute the cosine similarity between minibatch examples and all embeddings. norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset) similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True) init = tf.global_variables_initializer() num_steps = 200000 # - with tf.Session(graph=graph) as session: init.run() print('Initialized') average_loss = 0 for step in range(num_steps): batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window) feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels} _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict) average_loss += loss_val if step % 2000 == 0: if step > 0: average_loss /= 2000 print('Average loss at step ', step, ': ', average_loss) average_loss = 0 if step % 10000 == 0: sim = similarity.eval() for i in range(valid_size): valid_word = reverse_dictionary[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k + 1] log_str = 'Nearest to %s:' % valid_word for k in range(top_k): close_word = reverse_dictionary[nearest[k]] log_str = '%s %s,' % (log_str, close_word) print(log_str) final_embeddings = normalized_embeddings.eval() with open('goblet-list.p', 'wb') as fopen: pickle.dump(list(reverse_dictionary.values()), fopen) with open('goblet-vector.p', 'wb') as fopen: pickle.dump(final_embeddings, fopen)
huseinhouse.com/Word-Calculator/word-valculator-server/vector-goblet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Glyph objects # Logomaker uses the [glyph](https://logomaker.readthedocs.io/en/latest/Glyph.html) class to render individual glyphs. This class also allows the user to customize each glyph according to their needs. We begin by importing useful packages # + import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline plt.ion() import logomaker from logomaker import Glyph # - # As an example, the following figure shows how to raw a single character by using the Glyph class. # + fig, ax = plt.subplots(figsize=[7,3]) # set bounding box ax.set_xlim([0,2]) ax.set_ylim([0,1]) Glyph(c='A', p=1,ceiling=1.0,floor=0.0, ax=ax) # - # In the code snippet above, we have placed a single glyph 'A' at a specified position `p`, with in a specified bounding box (specified by a Matplotlib Axes object `ax`). We now use the same bounding box as above, and use additional keywords to customize the glyph's appearance, and location within the bounding box. # + fig, ax = plt.subplots(figsize=[7,3]) # set bounding box ax.set_xlim([0,2]) ax.set_ylim([0,1]) Glyph(c='A', p=1, width= 1.0, alpha=.85, ax = ax, ceiling=0.9, floor=0.05, font_name = 'BiauKai', color='dodgerblue', edgecolor='red', edgewidth=4) # - # We now place two glyphs next to each other and demonstrate the use of the keyword `flip`: # + fig, ax = plt.subplots(figsize=[7,3]) # set bounding box ax.set_xlim([0,2]) ax.set_ylim([0,1]) Glyph(c='A', p=0.5, width= 1.0, alpha=.85, ax = ax, ceiling=0.9, floor=0.05, font_name = 'BiauKai', color='dodgerblue', edgecolor='red', edgewidth=4) Glyph(c='A', p=1.3, width= 1.25, alpha=.85, ax = ax, ceiling=0.9, floor=0.05, font_name = 'Kai', color='black', edgewidth=4, flip=True) # - # ## Generate Logo from strings # # We can use the method [sequence_to_matrix](https://logomaker.readthedocs.io/en/latest/matrix.html#sequence-to-matrix) to generate logos from a user-specified string as follows: df = logomaker.sequence_to_matrix('LOGOMAKER') logo = logomaker.Logo(df) # We can now use the method [style_single_glyph](https://logomaker.readthedocs.io/en/latest/Logo.html#logomaker.Logo.style_single_glyph) to style single glyphs in the logo generated from the user string. df = logomaker.sequence_to_matrix('LOGOMAKER') logo = logomaker.Logo(df) logo.style_single_glyph(5,'A',font_name='Comic Sans MS', color='dodgerblue', floor=.5)
logomaker/tutorials/6_glyph_objects.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import matplotlib.pyplot as plt from math import sqrt import QGOpt as qgo import numpy as np from tqdm import tqdm # #### Parameters of an experiment. #=============================================# num_of_qubits = 2 # number of qubits num_of_meas = 600000 # number of measurements #=============================================# # #### Many qubits IC POVM. # + def kron(A, B): """Kronecker product of two tensors Args: A: tf tensor of shape (q, n, k) B: tf tensor of shape (p, m, l) Returns: tf tensor of shape (q * p, n * m, k * l)""" AB = tf.tensordot(A, B, axes=0) AB = tf.transpose(AB, (0, 3, 1, 4, 2, 5)) shape = AB.shape AB = tf.reshape(AB, (shape[0] * shape[1], shape[2] * shape[3], shape[4] * shape[5])) return AB # Pauli matrices sigma_x = tf.constant([[0, 1], [1, 0]], dtype=tf.complex128) sigma_y = tf.constant([[0 + 0j, -1j], [1j, 0 + 0j]], dtype=tf.complex128) sigma_z = tf.constant([[1, 0], [0, -1]], dtype=tf.complex128) # All Pauli matrices in one tensor sigma = tf.concat([sigma_x[tf.newaxis], sigma_y[tf.newaxis], sigma_z[tf.newaxis]], axis=0) # coordinates of thetrahedron peaks s0 = tf.constant([0, 0, 1], dtype=tf.complex128) s1 = tf.constant([2 * sqrt(2) / 3, 0, -1/3], dtype=tf.complex128) s2 = tf.constant([-sqrt(2) / 3, sqrt(2 / 3), -1 / 3], dtype=tf.complex128) s3 = tf.constant([-sqrt(2) / 3, -sqrt(2 / 3), -1 / 3], dtype=tf.complex128) # coordinates of thetrahedron peaks in one tensor s = tf.concat([s0[tf.newaxis], s1[tf.newaxis], s2[tf.newaxis], s3[tf.newaxis]], axis=0) # One qubit thetrahedral POVM M = 0.25 * (tf.eye(2, dtype=tf.complex128) + tf.tensordot(s, sigma, axes=1)) # Many qubits POVM Mmq = M for _ in range(num_of_qubits - 1): Mmq = kron(Mmq, M) # - # #### Data set generation (measurement outcomes simulation). # + # random Kraus set V = tf.random.normal((2 ** (3 * num_of_qubits), 2 ** num_of_qubits, 2), dtype=tf.float64) V = qgo.manifolds.real_to_complex(V) V, _ = tf.linalg.qr(V) V = tf.reshape(V, (2 ** (2 * num_of_qubits), 2 ** num_of_qubits, 2 ** num_of_qubits)) # Bell state bell_psi = tf.eye(2 ** num_of_qubits, dtype=tf.complex128) bell_rho = tf.tensordot(bell_psi, tf.math.conj(bell_psi), axes=0) # true Choi matrix choi_true = tf.einsum('qij,qkl,ajbl->aibk', V, tf.math.conj(V), bell_rho) choi_true = tf.reshape(choi_true, (2 ** (2 * num_of_qubits), 2 ** (2 * num_of_qubits))) # random initial dens. matrices psi_set = tf.random.normal((num_of_meas, 2 * num_of_qubits, 2), dtype=tf.float64) psi_set = qgo.manifolds.real_to_complex(psi_set) psi_set = psi_set / tf.linalg.norm(psi_set, axis=1)[:, tf.newaxis] rho_set = psi_set[..., tf.newaxis] * tf.math.conj(psi_set[:, tf.newaxis]) # dens. matrices after aplication of a random channel out_rho = tf.einsum('kij,klm,qjm->qil', V, tf.math.conj(V), rho_set) # Measurements simulation P = tf.cast(tf.einsum('qjk,pkj->pq', Mmq, out_rho), dtype=tf.float64) eps = tf.random.uniform((num_of_meas, 2 ** (2 * num_of_qubits)), dtype=tf.float64) eps = -tf.math.log(-tf.math.log(eps)) ind_set = tf.math.argmax(eps + tf.math.log(P), axis=-1) # Data set (projectors came true) M_set = tf.gather_nd(Mmq, ind_set[:, tf.newaxis]) true_llh = -tf.reduce_sum(tf.math.log(tf.linalg.trace(out_rho @ M_set))) # - # #### Optimization loop maximizes the logarithmic likelihood function. # + # Parameters of learning #=========================================# lr = 0.07 # learning rate num_of_iter = 400 # number of iterations #=========================================# # Initial choi matrix choi = tf.random.normal((2 ** (3 * num_of_qubits), 2 ** num_of_qubits, 2), dtype=tf.float64) choi = qgo.manifolds.real_to_complex(choi) choi, _ = tf.linalg.qr(choi) choi = tf.transpose(choi) choi = tf.reshape(choi, (2 ** (2 * num_of_qubits), 2 ** (2 * num_of_qubits))) choi = qgo.manifolds.complex_to_real(choi) choi = tf.Variable(choi) # optimizer initialization m = qgo.manifolds.ChoiMatrix() # S++ manifold opt = qgo.optimizers.RAdam(m, lr) # riemannian optimizer dist = [] # to be filled by trace distance vs iteration losses = [] # to be filled by loss vs iteration for _ in tqdm(range(num_of_iter)): with tf.GradientTape() as tape: choi_c = qgo.manifolds.real_to_complex(choi) choi_c = tf.reshape(choi_c, (2 ** num_of_qubits, 2 ** num_of_qubits, 2 ** (2 * num_of_qubits))) choi_c = tf.tensordot(choi_c, tf.math.conj(choi_c), [[2], [2]]) current_choi = tf.reshape(choi_c, (2 ** (2 * num_of_qubits), 2 ** (2 * num_of_qubits))) choi_c = tf.transpose(choi_c, (1, 3, 0, 2)) choi_c = tf.reshape(choi_c, (2 ** (2 * num_of_qubits), 2 ** (2 * num_of_qubits))) rho_set_resh = tf.reshape(rho_set, (-1, 2 ** (2 * num_of_qubits))) rho_out_resh = tf.tensordot(choi_c, rho_set_resh, [[1], [1]]) rho_out_resh = tf.transpose(rho_out_resh) rho_out = tf.reshape(rho_out_resh, (-1, 2 ** num_of_qubits, 2 ** num_of_qubits)) p = tf.abs(tf.linalg.trace(M_set @ rho_out)) loss = -tf.reduce_mean(tf.math.log(p)) # negatibe log likelihood grad = tape.gradient(loss, choi) # gradient opt.apply_gradients(zip([grad], [choi])) # optimization step losses.append(loss) dist.append(0.5 * tf.reduce_sum(tf.abs(tf.linalg.eigvalsh(current_choi - choi_true)))) np.save('qubits=' + str(num_of_qubits) + '.npy', dist) # - # #### Plotting trace distance vs number of iter plt.plot(np.load('qubits=1.npy') / 2, 'b') plt.plot(np.load('qubits=2.npy') / 4, 'r') plt.legend([r'$one \ qubit$', r'$two \ qubits$']) plt.yscale('log') plt.ylabel(r'$Trace \ distance$') plt.xlabel(r'$iter$') plt.savefig('trace_distance_vs_iter.pdf') plt.plot(losses) plt.plot([true_llh / 600000] * 400)
examples/ChoiMatrix_channel_tomography.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 2020년 2월 15일 토요일 # ### 백준 11652번: 카드 # ### 문제 : https://www.acmicpc.net/problem/11652 # ### 블로그 :https://somjang.tistory.com/entry/BaeKJoon-11652%EB%B2%88-%EC%B9%B4%EB%93%9C-Python # ### 첫번째 시도 # Dictionary를 활용 # + import sys N = int(input()) my_dict = {} for i in range(N): num = int(sys.stdin.readline()) if num not in my_dict.keys(): my_dict[num] = 1 else: my_dict[num] = my_dict[num] + 1 new_dict = dict([(value, key) for key, value in my_dict.items()]) print(new_dict[max(new_dict.keys())]) # - # ### 이런... # 결과는! - 틀렸습니다. # 왜 틀렸나하고 문제를 잘 살펴보니 # # 여러개의 숫자가 있을 경우 그 중에서 가장 작은 수를 출력해야하는 조건을 만족하지 못한 답이었습니다. # # 그리고 key와 value를 바꿔주는 과정에서도 같은 value값의 수가 key로 바뀌면서 # # 가장 작은 수를 추출할 수 없게 사라지는 경우도 있었습니다. # --- # ### 두번째 시도 # 이번에는 dictionary의 values() 함수를 활용해 풀어보기로 했습니다. # + import sys N = int(input()) my_dict = {} for i in range(N): num = int(sys.stdin.readline()) if num not in my_dict.keys(): my_dict[num] = 1 else: my_dict[num] = my_dict[num] + 1 max_num = max(list(my_dict.values())) answer = [] for key, value in my_dict.items(): if value == max_num: answer.append(key) print(min(answer)) # -
DAY 001 ~ 100/DAY009_[BaekJoon] 11625번 카드 (Python).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Enlib write_map bug # %load_ext autoreload # %autoreload 2 from enlib import enmap,wcs as mwcs import numpy as np import sys,os # We make a full-sky arcminute resolution geometry. I've only been able to reproduce this bug for res=1.0. res = 1.0 shape, wcs = enmap.fullsky_geometry(res=res*np.pi/180./60., proj="car") shape = (3,)+shape # We do a pix2sky that is needed by map2alm and make sure it gives a sensible result. # + ny, nx = shape[-2:] vy,vx = enmap.pix2sky(shape, wcs, [np.arange(ny),np.zeros(ny)]) hy,hx = enmap.pix2sky(shape, wcs, [np.zeros(nx),np.arange(nx)]) print(vy,vx,hy,hx) # - # It makes sense. We now save a map that has this geometry and load it back. # + root = os.environ['WORK']+"/" enmap.write_map(root+"temp.fits",enmap.zeros(shape,wcs,dtype=np.uint8)) lshape,lwcs = enmap.read_map_geometry(root+"temp.fits") print(shape,wcs) print(lshape,lwcs) print(mwcs.equal(wcs,lwcs)) # - # The shapes and wcs of the geometry we originally made and of the saved map seem to agree. So we proceed to do the same pix2sky operation on the loaded geometry. # + ny, nx = lshape[-2:] vy2,vx2 = enmap.pix2sky(lshape, lwcs, [np.arange(ny),np.zeros(ny)]) hy2,hx2 = enmap.pix2sky(lshape, lwcs, [np.zeros(nx),np.arange(nx)]) print(vy2,vx2,hy2,hx2) # - # The results are all nans. print(np.all(np.isclose(vy,vy2))) print(np.all(np.isclose(vx,vx2))) print(np.all(np.isclose(hy,hy2))) print(np.all(np.isclose(hx,hx2)))
scripts/enlib-write-map-bug.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="ewZjCbsyngyG" colab_type="code" outputId="cfd45263-d319-4363-b954-201373fb5114" colab={"base_uri": "https://localhost:8080/", "height": 51} # Install the PyDrive wrapper & import libraries # ! pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) # + id="LCPitPYlnkgN" colab_type="code" colab={} download = drive.CreateFile({'id': '1_WqUew2CdIfAY2oPh7kOZqgtXDtLa6CN'}) download.GetContentFile('train_HNzkrPW.zip') # !unzip train_HNzkrPW.zip # + id="YYiD1Z_rfyHA" colab_type="code" colab={} #https://drive.google.com/open?id=1xCoxZFAkskZ2TLXIn_VkyTRuHGltFIOZ download1 = drive.CreateFile({'id': '1xCoxZFAkskZ2TLXIn_VkyTRuHGltFIOZ'}) download1.GetContentFile('test_CF.csv') # + id="CpsHjybEpUQe" colab_type="code" outputId="51e6f1a0-9611-4619-e757-2b1f4a416f49" colab={"base_uri": "https://localhost:8080/", "height": 51} # !ls # + id="LB_r1vC2u7WG" colab_type="code" colab={} import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import os import cv2 import random # + id="kwGUUnmSu_OK" colab_type="code" colab={} bbox_train_df = pd.read_csv('bbox_train.csv') # + id="wavH0vhpvF8k" colab_type="code" outputId="e33e7775-b9fe-46cb-d16d-ef80974fb6ec" colab={"base_uri": "https://localhost:8080/", "height": 80} bbox_train_df.head(1) # + id="DQn5_FS7nFYJ" colab_type="code" outputId="c539dcdc-2f45-4125-a6e0-ed376e34a375" colab={"base_uri": "https://localhost:8080/", "height": 80} test_df = pd.read_csv('test_CF.csv') test_df.head(1) # + id="WSpmXhPznFsO" colab_type="code" colab={} # + id="-YG5gP4HvIiZ" colab_type="code" outputId="eb48b6a8-c55b-4ddb-e07f-317a66a80f4f" colab={"base_uri": "https://localhost:8080/", "height": 80} train_df = pd.read_csv('train.csv') train_df.head(1) # + id="o1sVBuqNhsP5" colab_type="code" outputId="bee3e76d-912a-48f3-f7ef-9a4c44593ebf" colab={"base_uri": "https://localhost:8080/", "height": 71} train_df['HeadCount'].unique() # + id="M4Xu6oeVvnFq" colab_type="code" outputId="e9f9d27e-05fc-4cd4-e2d8-fefba48de336" colab={"base_uri": "https://localhost:8080/", "height": 424} # Number of Images per Head Count ax = plt.subplots(figsize = (15, 6)) ax = train_df['HeadCount'].value_counts().sort_index().plot(kind = 'bar', width = .8) ax.set_ylabel('No of HeadCounts') ax.set_xlabel("HeadCounts") ax.set_title("Face Counting HeadCounts") for p in ax.patches: ax.annotate(format(p.get_height()), (p.get_x()+0.1, p.get_height()+2.0)) plt.show() # + id="GyrXf-3cmc0A" colab_type="code" outputId="8961feda-9e60-4556-b6f7-978376cc64e4" colab={"base_uri": "https://localhost:8080/", "height": 51} # !ls # + id="gvmPLDfimlTO" colab_type="code" colab={} # + id="52_Fum3mmlWX" colab_type="code" colab={} n_images = os.listdir('image_data') # + id="JXR-ci1ChY6G" colab_type="code" outputId="b1987034-d0e1-4531-a962-ea6c1bf89338" colab={"base_uri": "https://localhost:8080/", "height": 450} # Sample Images sample_images = random.sample(n_images, 12) f, ax = plt.subplots(2, 6, figsize = (15, 7)) for i in range(0, 12): im = cv2.imread('image_data/'+sample_images[i]) ax[i//6, i%6].imshow(im) ax[i//6, i%6].axis('off') #print(i//6, i%6) f.suptitle('Face Count Challenge') plt.subplots_adjust(wspace = 0, hspace = 0) plt.show() # + id="ga9xVYcaoX3A" colab_type="code" colab={} # + id="tplCpcxasepq" colab_type="code" colab={} # + id="2wkit1tGses0" colab_type="code" colab={} # + [markdown] id="yPaAYIJksfbF" colab_type="text" # **Model** # + id="PWqsX4swsgMO" colab_type="code" colab={} # importing Libraries
Face Counting Challenge/Face_Counting_Challenge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # En este cuaderno se implementan algunas funciones y algunos segmentos de código que pueden ser útiles para el desarrollo del [Taller 1](https://github.com/andresgm/Herramientas-Computacionales/tree/master/02_taller01) del curso. # # Comienzo importando dos librerías que no habíamos usado hasta ahora en el curso. # # La librería os nos permite interactuar con algunas funcionalidades del sistema operativo. La vamos a utilizar para construir rutas a archivos que sean independientes del sistema operativo utilizado. # # La librería pandas es muy utilizada para análisis de datos. En primera instancia implementa una estructura de datos llamada 'dataframe' similar a una hoja de calculo. Adicionalmente, implementa numerosas funciones para realizar calculos estadísticos sobre estas estructuras. # + outputHidden=false inputHidden=false import os import pandas as pd # - # Tradicional comando para facilitar el cálculo interactivo. # + outputHidden=false inputHidden=false # %pylab inline # - # En la siguiente celda muestro cómo podemos leer los datos desde archivos .csv a pandas dataframes. # # Acá se puede apreciar el uso de *os.path.join* para la creación de las rutas de los archivos. # # Noten la configuración de la función pd.read_csv. El parámetro sep=';' determina que las variables están separadas por punto y coma y no por coma. El encoding='latin-1' permite leer correctamente caracteres como la 'ñ'. # + outputHidden=false inputHidden=false base_dir = os.path.join( '.','data') output_dir = os.path.join( '.','output') ciudad = 'Valledupar' fuente = 'IDEAM' serie_csv = os.path.join(base_dir,ciudad,fuente+'.csv') serie_pd = pd.read_csv(serie_csv, sep = ';', encoding='latin-1') # + outputHidden=false inputHidden=false serie_pd.head # - # En la siguiente celda, implemento dos funciones útiles para realizar algunos de los cálculo necesarios para el cálculo de la ENFICC como está determinado en la [Resolución CREG 071 de 2006](http://apolo.creg.gov.co/Publicac.nsf/1c09d18d2d5ffb5b05256eee00709c02/4f8f33924247ce4a0525785a007a6d74?OpenDocument). # # La función *ghi_mensual* calcula la energía total en $kWh/m^2$ para cada mes en la serie de datos. Con esta información, la función *ghi_dia* calcula el promedio diario de cada mes. # + outputHidden=false inputHidden=false def ghi_mensual(serie_horaria): agnos = sorted(serie_horaria['AÑO'].unique()) ghi_df = pd.DataFrame(columns=["AÑO", "MES", "DIAS", "GHImes"]) for agno in agnos: for mes in range(1,13): nombre_energia = list(serie_horaria)[-1] dias = sorted(serie_horaria[(serie_horaria['AÑO'] == agno) & (serie_horaria['MES'] == mes)]["DIA"].unique()) ghi_mes = serie_horaria.loc[((serie_horaria["AÑO"] == agno) & (serie_horaria["MES"] == mes)), nombre_energia].sum()/1000 ghi_dict = {"AÑO":agno, "MES":mes, "DIAS":dias[-1], "GHImes":ghi_mes} ghi_df = ghi_df.append(ghi_dict, ignore_index=True) return ghi_df def ghi_dia(serie_ghi_mensual): agnos = sorted(serie_ghi_mensual['AÑO'].unique()) ghi_df = pd.DataFrame(columns=["AÑO", "MES", "GHIdiario"]) for agno in agnos: for mes in range(1,13): dias = serie_ghi_mensual.loc[((serie_ghi_mensual["AÑO"] == agno) & (serie_ghi_mensual["MES"] == mes)) ,"DIAS"].item() ghi_diario = serie_ghi_mensual.loc[( (serie_ghi_mensual["AÑO"] == agno) & (serie_ghi_mensual["MES"] == mes)), "GHImes"].item()/dias ghi_dict = {"AÑO":agno, "MES":mes, "GHIdiario":ghi_diario} ghi_df = ghi_df.append(ghi_dict, ignore_index=True) return ghi_df # - # En la siguiente celda se utilizan las dos funciones definidas previamente y se organizan los valores promedio mensuales. # + outputHidden=false inputHidden=false ghi_mensual_loc = ghi_mensual(serie_pd) ghi_diario_loc = ghi_dia(ghi_mensual_loc) datos_IDEAM = sorted(ghi_diario_loc["GHIdiario"]) # - # A continuación hago una sencilla gráfica de la distribución de energía promedio mensual. # + outputHidden=false inputHidden=false pylab.xlabel("Datos") pylab.ylabel("Energía/Energía Base IDEAM") numdatos = range(len(datos_IDEAM)) pylab.plot(numdatos,datos_IDEAM/datos_IDEAM[0],'-k', label='IDEAM') pylab.legend(loc='upper left') pylab.savefig(os.path.join(output_dir,'valledupar_IDEAM.png'), dpi=600) # - # Finalmente, se calcula el percentil correspondiente al valor de la ENFICC que se desea calcular. # + outputHidden=false inputHidden=false percentil = 0.01 enficc = ghi_diario_loc.quantile(q=percentil, numeric_only=True)["GHIdiario"] print('La ENFICC 99% es: ', enficc) # + outputHidden=false inputHidden=false ghi_mensual_loc # + outputHidden=false inputHidden=false
02_taller01/enficc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Ex 5 - Datacube for ML # # This notebook briefly describes how to use the `Datacube` class for ML projects. # We will use small and non-representive datasets for the example. The aim is for you to be able to run the code and understand the core concept of integrating the datacube into your ML project. # # In this example we will cover two main ML applications using the <a href='https://pytorch.org/'>Torch</a> framework: # 1. <b>Segmentation:</b> performs pixel-wise labeling with a set of object categories (for example, people, trees, sky, cars) for all image pixels. # # 2. <b>Object classification:</b> refers to a collection of related tasks for identifying objects in digital photographs. # # This example is dependent on installing `pytorch`, `xbatcher`. # **NOTE:** In order to execute this notebook successfully, one might have to instal extra dependencies for ML packages. Please install `ml_requirements` as mentioned in `setup.py`using `pip install -e .[ml] ` # + # Or uncomment the below line to install them. # #! pip install -e ../../.[ml] # - from pathlib import Path import os import icecube from icecube.bin.datacube import Datacube from icecube.bin.generate_cube import IceyeProcessGenerateCube import xbatcher import torch from torch.utils.data import Dataset from torchvision import datasets import xarray import numpy as np from torch.utils.data import DataLoader import matplotlib.pyplot as plt import numpy as np import matplotlib.pyplot as plt import torch.nn as nn from torch.autograd import Variable # # Segmentation Example # + # Read the datacube inputs resource_dir = os.path.join(str(Path(icecube.__file__).parent.parent), "tests/resources") grd_raster_dir = os.path.join(resource_dir, "grd_stack") cube_config_fpath = os.path.join(resource_dir, "json_config/config_use_case5.json") # This is file is created when you run the tests - please run `inv test` first. masks_labels_fpath = os.path.join(resource_dir, "labels/dummy_mask_labels.json") #[ # { # "product_file": "ICEYE_GRD_SLED_54549_20210427T215124_hollow_10x10pixels_fake_0.tif", # "labels": { # "segmentation": "/home/adupeyrat/Documents/code/icecube/tests/resources/masks/ICEYE_GRD_SLED_54549_20210427T215124_hollow_10x10pixels_fake_0.png" # } # }, # - dc = IceyeProcessGenerateCube.create_cube(grd_raster_dir, cube_config_fpath, masks_labels_fpath) dc.xrdataset # The dataset contains labels and data as `np.ndarray`. For the segmentation model we will create an IterableDataset that will be used to slice the datacube in 'Azimuth' and 'Range' direction. That way we can directly map it to a deep learning model. class Iceye_GRD_Loader(torch.utils.data.IterableDataset): def __init__(self, list_xrdataset): super(Iceye_GRD_Loader).__init__() # Change the following for your application - For the purpose of te demo we are going to concat # the same dataset along a new dimension, for a real project you are going to work with multiple stacks # For the example purpose we will work with path of size (6, 4, 4) using xbatcher to slice our # xarray dataset concated_dataset = xarray.concat(list_xrdataset, "stack") self.bgen = xbatcher.BatchGenerator(concated_dataset, {'stack': 1, 'Band': 6, 'Azimuth': 4, 'Range': 4}) def __iter__(self): for batch in self.bgen: # Tensorflow does not accept uint16 as type yield torch.from_numpy(np.squeeze(batch["Intensity"].values.astype("int32"), axis=None)), torch.from_numpy(np.squeeze(batch["Labels"].values, axis=None)).float() training_data = Iceye_GRD_Loader([dc.xrdataset for i in range(10)]) # We select a batch size of 2. train_dataloader = DataLoader(training_data, batch_size=2) # Display image and label. train_features, train_labels = next(iter(train_dataloader)) print(f"Feature batch shape: {train_features.size()}") print(f"Labels batch shape: {train_labels.size()}") w = 20 h = 20 fig = plt.figure(figsize=(w, 8)) columns = 5 rows = 1 for i in range(0, columns*rows+1): img = train_features[0].squeeze()[i] fig.add_subplot(rows, columns+1, i+1) plt.imshow(img) plt.show() # + import numpy as np import matplotlib.pyplot as plt w = 20 h = 20 fig = plt.figure(figsize=(w, 8)) columns = 5 rows = 1 for i in range(0, columns*rows+1): img = train_labels[0].squeeze()[i] fig.add_subplot(rows, columns+1, i+1) plt.imshow(img) plt.show() # + import torch from torch.nn import Module from torch.nn import Sequential from torch.nn import Conv2d, ReLU # We build a mini model with tensorflow that will read our images and apply and 1d convolution class MiniModel(Module): def __init__(self,): super(MiniModel, self).__init__() self.block1 = Sequential( Conv2d(6, 6, kernel_size=1, padding=0), ReLU() ) def forward(self, x): return self.block1(x) # - # Use gpu for training if available else use cpu device = 'cpu' # Here is the loss and optimizer definition model = MiniModel() criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.1) epochs = 50 for epoch in range(epochs): for i, (images, masks) in enumerate(train_dataloader, 1): images = images.to(device) masks = masks.type(torch.LongTensor) masks = masks.to(device) # Forward pass outputs = model(images.float()) loss = criterion(outputs.float(), masks.float()).float() # Backward and optimize optimizer.zero_grad() loss.sum().backward() optimizer.step() if (i) % 20 == 0: print (f"Epoch [{epoch + 1}/{epochs}], Loss: {loss.sum().item():4f}") # + print("Current model prediction") w = 20 h = 20 fig = plt.figure(figsize=(w, 8)) columns = 5 rows = 1 for i in range(0, columns*rows+1): output = outputs[0].squeeze()[i].detach().numpy() fig.add_subplot(rows, columns+1, i+1) plt.imshow(output) plt.show() print("target prediction") w = 20 h = 20 fig = plt.figure(figsize=(w, 8)) columns = 5 rows = 1 for i in range(0, columns*rows+1): mask = masks[0].squeeze()[i].detach().numpy() fig.add_subplot(rows, columns+1, i+1) plt.imshow(mask) plt.show() # - # **hyvää työtä!**
docs/examples/Ex5_Datacube_for_ML.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Melanoma analysis with fractal neural networks # This notebook shows how good is [Fractal neural network](#Fractal-neural-network) for [melanoma](#Melanoma) analysis. import datetime import numpy as np import tensorflow as tf import tensorflow_hub as hub import tensorflow_addons as tfa import matplotlib.pyplot as plt from sklearn.metrics import roc_curve, auc # Check if a GPU is available. tf.config.list_physical_devices('GPU') # # Melanoma # __Melanoma__, also redundantly known as __malignant melanoma__, is a type of skin cancer that develops from the pigment-producing cells known as melanocytes. Melanomas typically occur in the skin, but may rarely occur in the mouth, intestines, or eye (uveal melanoma). In women, they most commonly occur on the legs, while in men, they most commonly occur on the back. About 25% of melanomas develop from moles. Changes in a mole that can indicate melanoma include an increase in size, irregular edges, change in color, itchiness, or skin breakdown. # ![melanoma image](../assets/melanoma.jpg) # <div style="text-align: center; font-weight: bold">Pic.1. A melanoma of approximately 2.5 cm (1 in) by 1.5 cm (0.6 in)</div> # The primary cause of melanoma is ultraviolet light (UV) exposure in those with low levels of the skin pigment melanin. The UV light may be from the sun or other sources, such as tanning devices. Those with many moles, a history of affected family members, and poor immune function are at greater risk. A number of rare genetic conditions, such as xeroderma pigmentosum, also increase the risk. Diagnosis is by biopsy and analysis of any skin lesion that has signs of being potentially cancerous. # Melanoma is the most dangerous type of skin cancer. Globally, in 2012, it newly occurred in 232,000 people. In 2015, 3.1 million people had active disease, which resulted in 59,800 deaths. Australia and New Zealand have the highest rates of melanoma in the world. High rates also occur in Northern Europe and North America, while it is less common in Asia, Africa, and Latin America. In the United States, melanoma occurs about 1.6 times more often in men than women. Melanoma has become more common since the 1960s in areas mostly populated by people of European descent. # # Fractal neural network # We propose an ensemble model based on handcrafted fractal features and deep learning that consists of combining the classification of two CNNs by applying the sum rule. We apply feature extraction to obtain 300 fractal features from different # dermoscopy datasets. These features are reshaped into a 10 × 10 × 3 matrix to compose an artificial image that # is given as input to the first CNN. The second CNN model receives as input the correspondent original image. # ![CNN image](../assets/fnn.png) # <div style="text-align: center; font-weight: bold">Pic.2. Overview of the proposed FNN model.</div> # If you want to learn more about fractal neural networks, read [here](https://www.sciencedirect.com/science/article/abs/pii/S0957417420308563). # ## Developing the layer that divides images into patches. # According to the acticle: # > One of the approaches available in the literature for multiscale # analysis is the gliding-box algorithm (Ivanovici & Richard, 2011). The # main advantage of this approach is that it can be applied on datasets # containing images with different resolutions since the output features # are given in relation to the scale instead of being absolute values. # This algorithm consists in placing a box 𝛽𝑖 # sized 𝐿 × 𝐿 on the left # superior corner of the image, wherein 𝐿 is given in pixels. This box # glides through the image, one column and then one row at a time. After # reaching the end of the image, the box is repositioned at the starting # point and the value of 𝐿 is increased by 2. # The gliding-box method will not be used since it consumes too much RAM. We'll employ a box-counting approach, which basically means we'll partition the images into non-overlapping chunks. class BoxCountingPatch(tf.keras.layers.Layer): def __init__(self, box_size): super(BoxCountingPatch, self).__init__() self.box_size = box_size def call(self, inputs): patched_inputs = tf.image.extract_patches( inputs, sizes=(1, self.box_size, self.box_size, 1), strides=(1, self.box_size, self.box_size, 1), rates=(1, 1, 1, 1), padding='SAME' ) _, rows, cols, _ = patched_inputs.shape patched_inputs = tf.reshape(patched_inputs, shape=(-1, rows * cols, self.box_size, self.box_size, 3)) return patched_inputs # ## Developing the layer that creates an array of binary values from image patches using the Chebyshev colour distance function applied to the patch centre and each pixel. # According to the article: # > For each time the box 𝛽<sub>𝑖</sub> is moved, a multidimensional analysis of colour similarity is performed for every pixel inside it. This is done by assigning the centre pixel to a vector 𝑓<sub>𝑐</sub> = 𝑟<sub>𝑐</sub>, 𝑔<sub>𝑐</sub>, 𝑏<sub>𝑐</sub>, where 𝑟<sub>𝑐</sub>, 𝑔<sub>𝑐</sub> and 𝑏<sub>𝑐</sub> correspond to the colour intensities for each of the RGB colour channels of given pixel. The other pixels in the box are assigned to a vector 𝑓<sub>𝑖</sub> = 𝑟<sub>𝑖</sub>, 𝑔<sub>𝑖</sub>, 𝑏<sub>𝑖</sub> and compared to the centre pixel by calculating a colour distance 𝛥. On the proposed approach, the Chebyshev (𝛥<sub>ℎ</sub>) ... # The following equation is used to compute the Chebyshev distance. # $$ # \Delta_{h} = max(|f_{i}(k_{i}) - f_{c}(k_{c})|), k \in r, g, b. # $$ class ChebyshevBinaryPatch(tf.keras.layers.Layer): def __init__(self): super(ChebyshevBinaryPatch, self).__init__() def call(self, inputs): def helper(_input_): _, number_of_patches, box_size, _, channels = _input_.shape _input_ = tf.reshape(_input_, shape=(-1, box_size, box_size, channels)) centers = tf.image.resize_with_crop_or_pad(_input_, 1, 1) binary = tf.math.subtract(_input_, centers) binary = tf.math.abs(binary) binary = tf.math.reduce_max(binary, axis=3) binary = tf.math.less_equal(binary, box_size) binary = tf.cast(binary, dtype=tf.int32) binary = tf.reshape(binary, shape=(-1, number_of_patches, box_size, box_size)) return binary return [helper(_input_) for _input_ in inputs] # ## Developing the layer that creates an array of binary values from image patches using the Euclidean colour distance function applied to the patch centre and each pixel. # According to the article: # > For each time the box 𝛽<sub>𝑖</sub> is moved, a multidimensional analysis of colour similarity is performed for every pixel inside it. This is done by assigning the centre pixel to a vector 𝑓<sub>𝑐</sub> = 𝑟<sub>𝑐</sub>, 𝑔<sub>𝑐</sub>, 𝑏<sub>𝑐</sub>, where 𝑟<sub>𝑐</sub>, 𝑔<sub>𝑐</sub> and 𝑏<sub>𝑐</sub> correspond to the colour intensities for each of the RGB colour channels of given pixel. The other pixels in the box are assigned to a vector 𝑓<sub>𝑖</sub> = 𝑟<sub>𝑖</sub>, 𝑔<sub>𝑖</sub>, 𝑏<sub>𝑖</sub> and compared to the centre pixel by calculating a colour distance 𝛥. On the proposed approach, ... Euclidean (𝛥<sub>e</sub>) .. # $$ # \Delta_{e} = \sqrt{\sum_{k} (f_{i}(k_{i}) - f_{c}(k_{c}))^2}, k \in r, g, b # $$ class EuclideanBinaryPatch(tf.keras.layers.Layer): def __init__(self): super(EuclideanBinaryPatch, self).__init__() def call(self, inputs): def helper(_input_): _, number_of_patches, box_size, _, channels = _input_.shape _input_ = tf.reshape(_input_, shape=(-1, box_size, box_size, channels)) centers = tf.image.resize_with_crop_or_pad(_input_, 1, 1) binary = tf.math.subtract(_input_, centers) binary = tf.math.pow(_input_, 2) binary = tf.math.reduce_sum(binary, axis=3) binary = tf.math.pow(binary, 0.5) binary = tf.math.less_equal(binary, box_size) binary = tf.cast(binary, dtype=tf.int32) binary = tf.reshape(binary, shape=(-1, number_of_patches, box_size, box_size)) return binary return [helper(_input_) for _input_ in inputs] # ## Developing the layer that creates an array of binary values from image patches using the Manhattan colour distance function applied to the patch centre and each pixel. # According to the article: # > For each time the box 𝛽<sub>𝑖</sub> is moved, a multidimensional analysis of colour similarity is performed for every pixel inside it. This is done by assigning the centre pixel to a vector 𝑓<sub>𝑐</sub> = 𝑟<sub>𝑐</sub>, 𝑔<sub>𝑐</sub>, 𝑏<sub>𝑐</sub>, where 𝑟<sub>𝑐</sub>, 𝑔<sub>𝑐</sub> and 𝑏<sub>𝑐</sub> correspond to the colour intensities for each of the RGB colour channels of given pixel. The other pixels in the box are assigned to a vector 𝑓<sub>𝑖</sub> = 𝑟<sub>𝑖</sub>, 𝑔<sub>𝑖</sub>, 𝑏<sub>𝑖</sub> and compared to the centre pixel by calculating a colour distance 𝛥. On the proposed approach, ... Manhattan (𝛥<sub>m</sub>) .. # $$ # \Delta_{m} = \sum_{k} |f_{i}(k_{i}) - f_{c}(k_{c})|, k \in r, g, b # $$ class ManhattanBinaryPatch(tf.keras.layers.Layer): def __init__(self): super(ManhattanBinaryPatch, self).__init__() def call(self, inputs): def helper(_input_): _, number_of_patches, box_size, _, channels = _input_.shape _input_ = tf.reshape(_input_, shape=(-1, box_size, box_size, channels)) centers = tf.image.resize_with_crop_or_pad(_input_, 1, 1) binary = tf.math.subtract(_input_, centers) binary = tf.math.abs(binary) binary = tf.math.reduce_sum(binary, axis=3) binary = tf.math.less_equal(binary, box_size) binary = tf.cast(binary, dtype=tf.int32) binary = tf.reshape(binary, shape=(-1, number_of_patches, box_size, box_size)) return binary return [helper(_input_) for _input_ in inputs] # ## Developing the layer that calculates probability matrices # According to the article: # > After performing this conversion for every box of every given 𝐿 scale, a structure known as probability matrix is generated. Each element of the matrix corresponds to the probability 𝑃 that 𝑚 pixels on a scale 𝐿 are labelled as 1 on each box. ... The matrix is normalized in a way that the sum of the elements in a column is equal to 1, as showed here: # $$ # \sum_{m=1}^{L^2} P(m, L) = 1, \forall L # $$ # + class ProbabilityMatrix(tf.keras.layers.Layer): def __init__(self): super(ProbabilityMatrix, self).__init__() def call(self, inputs): color_distance_inputs = [] for color_distance_input in inputs: box_outputs = [] for box_input in color_distance_input: number_of_ones_for_every_patch = tf.map_fn( lambda batch: tf.map_fn( lambda patch: tf.math.reduce_sum(patch), batch ), box_input ) _, number_of_patches, box_size, _ = box_input.shape probabilities = tf.math.bincount( number_of_ones_for_every_patch, minlength=1, maxlength=box_size ** 2, axis=-1 ) probabilities = tf.math.divide(probabilities, number_of_patches) probabilities = tf.map_fn( lambda x: x[0] / x[1], elems=(probabilities, tf.math.reduce_sum(probabilities, axis=1)), fn_output_signature=tf.float64 ) box_outputs.append(probabilities) color_distance_inputs.append(box_outputs) return color_distance_inputs # - # ## Developing the layer that calculates fractal dimensions # According to the article: # > FD is the most common technique to evaluate the fractal properties of an image. This is a measure for evaluating the irregularity and the complexity of a fractal. To obtain local FD features from the probability # matrix, for each value of 𝐿, the FD denominated 𝐷(𝐿) is calculated according to # $$ # D(L) = \sum_{m=1}^{L^2} \frac{P(m, L)}{m} # $$ class FractalDimension(tf.keras.layers.Layer): def __init__(self): super(FractalDimension, self).__init__() def call(self, inputs): color_distance_outputs = [] for color_distance_input in inputs: box_outputs = [] for box_input in color_distance_input: probability_numbers = tf.range(1, len(box_input) + 1, dtype=tf.float32) fractal_dimension = tf.map_fn( lambda x: x[0] / x[1], elems=(box_input, probability_numbers), fn_output_signature=tf.float32 ) fractal_dimension = tf.math.reduce_sum(fractal_dimension, axis=1) box_outputs.append(fractal_dimension) color_distance_outputs.append(box_outputs) return color_distance_outputs # ## Developing the layer that calculates lacunarity # According to the article: # > LAC is a measure complementary to FD and allows to evaluate how the space of a fractal is filled (Ivanovici & Richard, 2009). From the probability matrix, first and second-order moments are calculated with # $$ # \mu(L) = \sum_{m=1}^{L^2} mP(m, L) # $$ # $$ # \mu^2(L) = \sum_{m=1}^{L^2} m^{2}P(m, L) # $$ # > The LAC value for a scale 𝐿 is given by 𝛬(𝐿), which is obtained according to # $$ # \Lambda(L) = \frac{\mu^{2}(L) - (\mu(L))^{2}}{(\mu(L))^{2}} # $$ class Lacunarity(tf.keras.layers.Layer): def __init__(self): super(Lacunarity, self).__init__() def call(self, inputs): color_distance_outputs = [] for color_distance_input in inputs: box_outputs = [] for box_input in color_distance_input: probability_numbers = tf.range(1, len(box_input) + 1, dtype=tf.float32) mu_first_2 = tf.map_fn( lambda x: x[0] * x[1], elems=(box_input, probability_numbers), fn_output_signature=tf.float32 ) mu_first_2 = tf.math.reduce_sum(mu_first_2, axis=1) mu_first_2 = tf.math.pow(mu_first_2, 2) mu_second = tf.math.pow(probability_numbers, 2) mu_second = tf.map_fn( lambda x: x[0] * x[1], elems=(box_input, mu_second), fn_output_signature=tf.float32 ) mu_second = tf.math.reduce_sum(mu_second, axis=1) lacunarity = tf.math.divide( tf.math.subtract(mu_second, mu_first_2), mu_first_2 ) box_outputs.append(lacunarity) color_distance_outputs.append(box_outputs) return color_distance_outputs # ## Developing the layer that calculates percolation C - the average number of clusters per box on a scale L # According to the article: # > Let 𝑐<sub>𝑖</sub> be the number of clusters on a box 𝛽<sub>𝑖</sub>, the feature 𝐶(𝐿) that represents the average number of clusters per box on a scale 𝐿 is given by # $$ # C(L) = \frac{\sum_{i=1}^{T(L)} c_{i}}{T(L)} # $$ class PercolationC(tf.keras.layers.Layer): def __init__(self): super(PercolationC, self).__init__() def call(self, inputs): color_distance_outputs = [] for color_distance_input in inputs: box_outputs = [] for box_input in color_distance_input: percolation_c = tf.math.reduce_mean( tf.map_fn( lambda batch: tf.map_fn( lambda patch: tf.math.reduce_max(tfa.image.connected_components(patch)), batch ), box_input ), axis=1 ) percolation_c = tf.cast(percolation_c, dtype=tf.float32) box_outputs.append(percolation_c) color_distance_outputs.append(box_outputs) return color_distance_outputs # ## Developing the layer that calculates percolation M - the average coverage area of the largest cluster on a scale L # According to the article: # >Another feature that can be obtained is the average coverage area of the largest cluster in a box and is given by 𝑀(𝐿). Let 𝑚<sub>𝑖</sub> be the size in pixels of the largest cluster of the box 𝛽<sub>𝑖</sub>. The feature 𝑀(𝐿) is givenaccording to # $$ # M(L) = \frac{\sum_{i=1}^{T(L)} \frac{m_{i}}{L^2}}{T(L)} # $$ class PercolationM(tf.keras.layers.Layer): def __init__(self): super(PercolationM, self).__init__() def call(self, inputs): color_distance_outputs = [] for color_distance_input in inputs: box_outputs = [] for box_input in color_distance_input: percolation_m = tf.math.reduce_mean( tf.map_fn( lambda batch: tf.map_fn( lambda patch: self.most_common(tf.reshape(tfa.image.connected_components(patch), shape=(-1,))), batch ), box_input ), axis=1 ) percolation_m = tf.cast(percolation_m, dtype=tf.float32) box_outputs.append(percolation_m) color_distance_outputs.append(box_outputs) return color_distance_outputs def most_common(self, array): _, _, counts = tf.unique_with_counts(array) return tf.math.reduce_max(counts) # ## Developing the layer that calculates percolation Q - the average occurrence of percolation on a scale L # According to the article: # > We can also verify whether a box 𝛽<sub>𝑖</sub> is percolating. This can be achieved due to a property that states a percolation threshold for different types of structures. In squared matrices (digital images), this threshold has the value of 𝑝 = 0.59275, which means that if the ratio between pixels labelled as 1 and pixels labelled as 0 is greater or equal than 𝑝, the matrix is considered as percolating. Let 𝛺<sub>𝑖</sub> be the number of pixels labelled as 1 in a box 𝛽<sub>𝑖</sub> with size 𝐿 × 𝐿, we determine whether such box is percolating according to # $$ # q_{i} = # \begin{cases} # 1, & \frac{\Omega_{i}}{L^2} \ge 0.59275 \\ # 0, & \frac{\Omega_{i}}{L^2} < 0.59275 # \end{cases} # $$ # > This results in a binary value for 𝑞<sub>𝑖</sub>, wherein 1 indicates that thebox is percolating. The feature 𝑄(𝐿) regards the average occurrence of percolation on a scale 𝐿 and can be obtained by # $$ # Q(L) = \frac{\sum_{i=1}^{T(L)} q_{i}}{T(L)} # $$ class PercolationQ(tf.keras.layers.Layer): def __init__(self, threshold=0.59275): super(PercolationQ, self).__init__() self.threshold = threshold def call(self, inputs): color_distance_outputs = [] for color_distance_input in inputs: box_outputs = [] for box_input in color_distance_input: number_of_ones_for_every_patch = tf.map_fn( lambda batch: tf.map_fn( lambda patch: tf.math.reduce_sum(patch), batch ), box_input ) _, number_of_patches, box_size, _ = box_input.shape percolation_q = tf.math.divide(number_of_ones_for_every_patch, box_size ** 2) percolation_q = tf.math.greater_equal(percolation_q, self.threshold) percolation_q = tf.cast(percolation_q, dtype=tf.float32) percolation_q = tf.math.reduce_mean(percolation_q, axis=1) box_outputs.append(percolation_q) color_distance_outputs.append(box_outputs) return color_distance_outputs def most_common(self, array): _, _, counts = tf.unique_with_counts(array) return tf.math.reduce_max(counts) # ## Developing the layer that assembles fractal features into images # According to the article: # > To serve as input for the incoming CNN classification, the feature vectors generated on the previous layers of the network must be converted into feature matrices. To do so, the 100 features obtained by each distance 𝛥 are rearranged as a 10 × 10 matrix. The matrices generated by 𝛥<sub>ℎ</sub>, 𝛥<sub>𝑒</sub> and 𝛥<sub>𝑚</sub> correspond to the R, G and B colour channels, respectively. ... Since each of the functions 𝐶(𝐿), 𝑄(𝐿), 𝑀(𝐿), 𝛬(𝐿) and 𝐷(𝐿), obtained from a specific 𝛥, generate 20 features, each function is fit exactly into 2 columns of the matrix. # # >Since each of the functions 𝐶(𝐿), 𝑄(𝐿), 𝑀(𝐿), 𝛬(𝐿) and 𝐷(𝐿), obtained from a specific 𝛥, generate 20 features, each function is fit exactly into 2 columns of the matrix. class AssembleFractalImage(tf.keras.layers.Layer): def __init__(self): super(AssembleFractalImage, self).__init__() def call(self, inputs): output = tf.convert_to_tensor(inputs) output = tf.transpose(output, perm=(3, 1, 0, 2)) output = tf.reshape(output, shape=(-1, 10, 10, 3)) return output # ## Assembling the layers into fractal neural network class FractalNeuralNetwork(tf.keras.Model): def __init__(self, class_number, verbose, ensure_input_shape): super(FractalNeuralNetwork, self).__init__() self.verbose = verbose self.ensure_input_shape = ensure_input_shape self.box_counting_patches = [BoxCountingPatch(box_size) for box_size in range(3, 41 + 1, 2)] self.chebyshev = ChebyshevBinaryPatch() self.euclidean = EuclideanBinaryPatch() self.manhattan = ManhattanBinaryPatch() self.percolation_c = PercolationC() self.percolation_m = PercolationM() self.percolation_q = PercolationQ() self.probability = ProbabilityMatrix() self.fractal_dimension = FractalDimension() self.lacunarity = Lacunarity() self.assemble = AssembleFractalImage() self.resize = tf.keras.layers.Resizing(width=224, height=224) self.rescale_original = tf.keras.layers.Rescaling(scale=1./255) self.rescale_fractal = tf.keras.layers.Lambda(lambda x: tf.math.divide(x, tf.math.reduce_max(x))) self.mobilenet_v2 = hub.KerasLayer("https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4", output_shape=[1280], trainable=False) self.combine = tf.keras.layers.Add() self.score = tf.keras.layers.Dense(class_number, activation='softmax') def call(self, inputs): inputs = tf.ensure_shape(inputs, self.ensure_input_shape) box_counting_patches = [box_counting_patch(inputs) for box_counting_patch in self.box_counting_patches] self.log(message='patchify') chebyshev = self.chebyshev(inputs=box_counting_patches) self.log(message='chebyshev') euclidean = self.euclidean(inputs=box_counting_patches) self.log(message='euclidean') manhattan = self.manhattan(inputs=box_counting_patches) self.log(message='manhattan') percolation_c = self.percolation_c(inputs=[chebyshev, euclidean, manhattan]) self.log(message='percolation_c') percolation_m = self.percolation_m(inputs=[chebyshev, euclidean, manhattan]) self.log(message='percolation_m') percolation_q = self.percolation_q(inputs=[chebyshev, euclidean, manhattan]) self.log(message='percolation_q') probability = self.probability(inputs=[chebyshev, euclidean, manhattan]) self.log(message='probability') fractal_dimension = self.fractal_dimension(inputs=probability) self.log(message='fractal_dimension') lacunarity = self.lacunarity(inputs=probability) self.log(message='lacunarity') fractal_output = self.assemble( inputs=[ fractal_dimension, lacunarity, percolation_c, percolation_m, percolation_q ] ) self.log(message='fractal_output assemble') fractal_output = self.resize(fractal_output) self.log(message='fractal_output resize') fractal_output = self.rescale_fractal(fractal_output) self.log(message='fractal_output rescale') fractal_output = self.mobilenet_v2(fractal_output) self.log(message='fractal_output mobilenet_v2') original_output = self.rescale_original(inputs) self.log(message='original_output rescale') original_output = self.mobilenet_v2(original_output) self.log(message='original_output mobilenet_v2') combined_output = self.combine([fractal_output, original_output]) self.log(message='combined_output combine') output = self.score(combined_output) self.log(message='output score') self.log(message='_' * 100) return output def log(self, message): if self.verbose: print(f'\n{message}\n') # # Data loading # + generator = tf.keras.preprocessing.image.ImageDataGenerator( rotation_range=180, horizontal_flip=True, vertical_flip=True, brightness_range=(0.2, 1.5), validation_split=0.2, ) training_set = generator.flow_from_directory( '/small-data', target_size=(224, 224), batch_size=32, class_mode='categorical', subset='training' ) validation_set = generator.flow_from_directory( '/small-data', target_size=(224, 224), batch_size=32, class_mode='categorical', subset='validation' ) # - CLASS_NUMBER = len(training_set.class_indices) # ### Data source # As a data source, we use the ISIC Archive. # The ISIC Archive is an open source platform with publicly available images of skin lesions under Creative Commons licenses. The images are associated with ground-truth diagnoses and other clinical metadata. Images can be queried using faceted search and downloaded individually or in batches. The initial focus of the archive has been on dermoscopy images of individual skin lesions, as these images are inherently standardized by the use of a specialized acquisition device and devoid of many of the privacy challenges associated with clinical images. To date, the images have been provided by specialized melanoma centers from around the world. The archive is designed to accept contributions from new sources under the Terms of Use and welcomes new contributors. There are ongoing efforts to supplement the dermoscopy images in the archive with close-up clinical images and a broader representation of skin types. The images in the Archive are used to support educational efforts through linkage with Dermoscopedia and are used for Grand Challenges and Live Challenges to engage the computer science community for the development of diagnostic AI. # For more information, go to [ISIC Archive web site](https://www.isic-archive.com/) # # Model training # ## Preparing TensorFlow callbacks # For our convenience, we create a few TensorFlow callbacks. # ### The TensorBoard callback # We want to see how the training is going. We add the callback, which will log the metrics to TensorBoard. log_dir = '../logs/fit/' + datetime.datetime.now().strftime('fractalnet') tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) # ### The EarlyStopping callback # This callback stops training when the metrics (e.g. validation loss) are not improving, early_stop_callback = tf.keras.callbacks.EarlyStopping( monitor="val_loss", min_delta=0.01, patience=10, restore_best_weights=True ) # ### The ModelCheckpoint callback # This callback saves the model with the best metrics during training. # + checkpoint_path = 'checkpoints/fractalnet.ckpt' checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( checkpoint_path, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, save_freq='epoch', mode='auto' ) # - # ## Training the model model = FractalNeuralNetwork( class_number=CLASS_NUMBER, verbose=True, ensure_input_shape=(None, 224, 224, 3) ) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.fit( training_set, validation_data=validation_set, epochs=200, callbacks=[ tensorboard_callback, checkpoint_callback, early_stop_callback ] ) # # Model validation # ## Loading the model # We load the model with the best metrics (e.g. validation loss) from the checkpoint. model = FractalNeuralNetwork( class_number=CLASS_NUMBER, verbose=True, ensure_input_shape=(None, 224, 224, 3) ) model.load_weights('./checkpoints/fractalnet.ckpt') # ## Loading the test data testing_set = generator.flow_from_directory( '/small-data-test', target_size=(224, 224), batch_size=32, class_mode='categorical' ) # ## Making diagnoses true_labels = np.concatenate([testing_set[i][1] for i in range(len(testing_set))], axis=0) predicted_labels = model.predict(testing_set) # ## Plot the ROC Curve # + fpr = dict() tpr = dict() auc_metric = dict() diagnosis_index_dict = {v: k for k, v in testing_set.class_indices.items()} for i in range(CLASS_NUMBER): diagnosis = diagnosis_index_dict[i] fpr[diagnosis], tpr[diagnosis], _ = roc_curve(true_labels[:, i], predicted_labels[:, i]) auc_metric[diagnosis] = auc(fpr[diagnosis], tpr[diagnosis]) # + for diagnosis in testing_set.class_indices: plt.plot(fpr[diagnosis], tpr[diagnosis], label=diagnosis) plt.plot([0, 1], [0, 1], 'k--') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic') plt.legend(loc="lower right") plt.show() # - # ## Show AUC auc_metric
fractalnet/notebook-ENG-new-architecture.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # HMM(隐马尔可夫模型) # ## 隐马尔可夫模型的基本概念 # ### 背景知识 # **动态模型(hmm, Karman Filter, Particle Filter)**包括时间序列和混合模型(系统状态(system state)和观测变量). # # 如果系统状态为离散,则模型为HMM;若为线性连续,则模型为Karman Filter;若为非线性连续,则为Particle Filter. # ### 隐马尔可夫模型的定义 # **状态序列(state sequence)**:隐藏的马尔可夫链随机生成的状态的序列。 # # **观测序列(observation sequence)**:每个状态生成一个观测,由此产生的观测的随机序列。 # # 设Q为所有可能的状态集合,V是所有可能的观测集合。 # $$ # Q=\{ q_1,q_2,\cdots,q_N\}, \quad V=\{ v_1,v_2,\cdots,v_N\} # $$ # # **状态转移矩阵**:$A=[a_{ij}]_{N\times N}$ # # 其中, # $$ # a_{ij} = P(i_{t+1}=q_j|i_t = q_i)\quad i=1,2,\cdots,N;j=1,2,\cdots,N # $$ # 即时刻t处于状态$q_i$的条件下在时刻t+1转移到状态$q_j$的概率。 # # **观测概率矩阵(发射矩阵)**:$B=[b_j(k)]_{N\times N}$ # # 其中, # $$ # b_j(k) = P(o_t=v_k|i_t=q_j)\quad j=1,2,\cdots,N;k=1,2,\cdots,N # $$ # 即刻t处于状态$q_j$的条件下生成观测$v_k$的概率。 # # **初始概率向量**:$\pi=(\pi_i)$ # # 其中, # $$ # \pi_i = P(i_1=q_i)\quad i=1,2,\cdots,N # $$ # 即时刻t=1时处于状态$q_i$的概率。 # # 综上,**隐马尔可夫模型**$\lambda$可以表达为 # $$ # \lambda = (\pi, A, B) # $$ # **其次马尔科夫假设(无后效性)**假设隐藏的马尔科夫链在任意时刻链t的状态只依赖其前一时刻的状态,与其他时刻的状态及观测无关,也与时刻t无关。 # $$ # P(i_t|i_{t-1}, o_{t-1},i_{t-2},o_{t-2}, \cdots,i_1, o_1)=P(i_t|i_{t-1}) # $$ # **独立观测假设**假设任意时刻的观测只依赖于该时刻的隐马尔科夫链的状态,与其他状态和观测无关。 # $$ # P(o_t|i_{T}, o_{T},i_{T-1},o_{T-1}, \cdots,i_1, o_1)=P(o_t|i_t) # $$ # ### 观测序列的生成过程 # 输入:隐马尔可夫模型$\lambda =(\pi,A,B)$,观测序列长度T; # # 输出:观测序列$o={o_1,o_2,\cdots,o_T}$ # # (1)按照初始状态$\pi$产生状态$i_1$ # # (2)令t=1 # # (3)按照状态$i_t$的观测概率分布$b_{i_t}(k)$生成$o_t$ # # (4)按照状态$i_t$的状态转移概率分布$a_{i_t,i_{t+1}}$生成$i_{t+1}$ # # (5)令t=t+1,若t<T则转(3),否则终止。 # # ### HMM的三个问题 # (1)**概率计算问题**。给定模型$\lambda=(\pi,A,B)$和观测序列$O=(o_1,o_2,\cdots,o_T)$,计算模型$\lambda$在观测序列O出现的概率$P(O|\lambda)$. # # (2)**学习问题**。已知观测序列$O=(o_1,o_2,\cdots,o_T)$,估计模型$\lambda=(\pi,A,B)$参数是的在该模型下观测序列概率$P(O|\lambda)$最大,即用极大似然估计的方法估计参数。 # # (3)**预测问题**(解码问题)。已知模型$\lambda=(\pi,A,B)$和观测序列$O=(o_1,o_2,\cdots,o_T)$,求对给定观测序列条件概率P(I|O)最大的状态序列$I=(i_1,i_2,\cdots,i_T)$.即给定观测序列,求最有可能的对应的状态序列。 # ## 概率计算算法 # ### 直接计算法 # $P(O|\lambda) = \sum_IP(O,I|\lambda)P(I|\lambda)\\ # =\sum_{i_1,i_2,\cdots,i_T}\pi_{i_1}b_{i_1}a_{i_1,i_2}\pi_{i_1}b_{i_1}(o_1)a_{i_1,i_2}b_{i_2}(o_2)\cdots a_{i_{T-1},i_T}b_{i_T}(o_T)\\ # =\sum_{i_1}\sum_{i_2}\cdots\sum_{i_T}\pi(a_{i1})\prod_{t=2}a_{i_{t-1}}a_{i_t}\prod_{t=1}b_{i_t}(o_t)$ # # 算法复杂度:$O(TN^T)$ # # ### 前向算法 # **前向概率**给定HMM$\lambda$,定义到时刻t部分观测序列为$o_1,o_2,\cdots,o_t$且状态为$q_i$的概率为前向概率。 # $$ # \alpha_t(i)=P(o_1,o_2,\cdots,o_t,i_t=q_i|\lambda) # $$ # 由此可得 # $$ # \alpha_{t+1}(j)=P(o_1,o_2,\cdots,o_{t+1},i_{t+1}=q_j|\lambda)\\ # =\sum_{i=1}^NP(o_1,o_2,\cdots,o_{t+1},i_{t+1}=q_j, i_t=q_i|\lambda)\\ # =\sum_{i=1}^NP(o_{t+1}|o_1,o_2,\cdots,o_t,i_{t+1}=q_j, i_t=q_i,\lambda)*P(o_1,o_2,\cdots,o_t,i_{t+1}=q_j, i_t=q_i|\lambda)\\ # =\sum_{i=1}^NP(o_{t+1}|i_{t+1}=q_j, \lambda)*P(o_1,o_2,\cdots,o_t,i_{t+1}=q_j, i_t=q_i|\lambda)\\ # =\sum_{i=1}^NP(o_{t+1}|i_{t+1}=q_j, \lambda)*P(i_{t+1}=q_j|o_1,o_2,\cdots,o_t,i_t=q_i,\lambda)*P(o_1,o_2,\cdots,o_t,i_t=q_i|\lambda)\\ # =\sum_{i=1}^NP(o_{t+1}|i_{t+1}=q_j, \lambda)*P(i_{t+1}=q_j|i_t=q_i,\lambda)*P(o_1,o_2,\cdots,o_t,i_t=q_i|\lambda)\\ # =\sum_{i=1}^N\alpha_ta_{ij}b_j(o_{t+1}) # $$ # **算法**: # # 输入:HMM$\lambda$,观测序列O。 # # 输出:观测序列概率$P(O|\lambda)$。 # # (1)初值$\alpha_1(i)=\pi_ib_i(o_1)$ # # (2)递推, 对t=1,2,...,N-1, # $$\alpha_{t+1}(j)=\sum_{i=1}^N\alpha_ta_{ij}b_j(o_{t+1})$$ # # (3)终止,$P(O|\lambda)=\sum_{i=1}^N\alpha_T(i)$ # # ### 后向算法 # 记$\beta_t(i)=P(o_{t+1,\cdots,o_T|i_t=q_i,\lambda})$ # # $$ # P(O|\lambda)=P(o_1,o_2,\cdots,o_T|\lambda)\\ # =\sum{i=1}^NP(o_1,o_2,\cdots,o_T,i_1=q_i)\\ # =\sum{i=1}^NP(o_1,o_2,\cdots,o_T|i_1=q_i)P(i_1=q_i)\\ # \sum{i=1}^NP(o_1|i_1=q_i)P(i_1=q_i)\\ # =\sum_{i=1}^N\pi_ib_i(o_1)\beta_1(i) # $$ # 由递推得(由于HMM给定,推导时进行忽略) # $$ # \beta_t(i)=P(o_{t+1},\cdots,o_T|i_t=q_i)\\ # =\sum_{i=1}^NP(o_{t+1},\cdots,o_T,i_{t+1}=q_j|i_t=q_i)\\ # =\sum_{i=1}^NP(o_{t+1},\cdots,o_T|i_t=q_i,i_{t+1}=q_j)P(i_{t+1}=q_j|i_t=q_i)\\ # =\sum_{i=1}^NP(o_{t+1},\cdots,o_T|i_{t+1}=q_j)a_{ij}\\ # =\sum_{i=1}^NP(o_{t+1}|o_{t+2},\cdots,o_T,i_{t+1}=q_j)P(o_{t+2},\cdots,o_T|i_{t+1}=q_j)a_{ij}\\ # =\sum_{j=1}^Na_{ij}b_j(o_{t+1})\beta_{t+1}(j) # $$ # # **算法** # # 输入:HMM$\lambda$,观测序列O。 # # 输出:观测序列概率$P(O|\lambda)$。 # # (1) # $$ # \beta_T(i)=1\quad i=1,2,\cdots,N # $$ # # (2)对$t=T-1,T-2,\cdots,1$ # $$ # \beta_t(i)=\sum_{j=1}^Na_{ij}b_j(o_{t+1})\beta_{(t+1)}(j) # $$ # # (3) # $$ # P(O|\lambda)=\sum_{i=1}^N\pi_ib_i(o_1)\beta_1(i) # $$ # # ## 学习算法 # ### Baum-Welch算法(EM算法) # 由EM算法得 # $$ # \lambda^{(t+1)}=arg\max_{\lambda}\sum_IlogP(O,I|\lambda)P(I|O,\hat{\lambda}) # $$ # **1.确定完全数据的对数似然函数** # 给定训练数据包含S个长度为T的观测序列$\{O_1,O_2,\cdots,O_S\}$,训练目标为HMM$\lambda=(\pi,A,B)$ # # 所有观测数据写为$O=\{o_1,o_2,\cdots,o_T\}$,所有隐数据写为$I=\{i_1,i_2,\cdots,i_T\}$.完全数据$(O,I)=(o-1,\cdots,o_T,i_1,\cdots,i_T)$,完全数据的对数似然函数为$log(O,I|\lambda)$ # # **2.EM算法E步,求Q函数$Q(\lambda,\hat{\lambda})$** # $$ # Q(\lambda,\hat{\lambda})=\sum_IlogP(O,I|\lambda)P(I|O,\hat{\lambda})\\ # =\sum_Ilog\pi_{i_1}P(O,I|\hat{\lambda})+\sum_I\left(\sum_{(t=1)}^{T-1}loga_{i,i+1}\right)P(O,I|\hat{\lambda})+\sum_I\left(\sum_{(t=1)}^{T}logb_{i_t}(o_t)\right)P(O,I|\hat{\lambda}) # $$ # # **EM算法M步,极大化Q函数求模型参数** # 要极大化的三项分别单独出现在三项中,因此只需要对各项分别极大化 # **(1)** # Q函数第一项可以改写为 # $$ # \sum_Ilog\pi_{i_1}P(O,I|\hat{\lambda})=\sum_{i=1}^Nlog\pi_iP(O,i_1=i|\hat{\lambda}) # $$ # 注意到$\sum_{i=1}^N\pi_i=1$,利用拉格朗日乘子法,得到拉格朗日函数 # $$ # \sum_{i=1}^Nlog\pi_iP(O,i_1=i|\hat{\lambda})+\gamma\left(\sum_{i=1}^N\pi_i-1\right) # $$ # 对$\pi_i$求偏导并令结果为0得 # $$ # P(O,i_1=i|\hat{\lambda})+\gamma\pi_i=0 # $$ # 对i求和得到 # $$ # \gamma=-P(O|\hat{\lambda}) # $$ # 即 # $$ # \pi_i=\frac{P(O,i_1=i|\hat{\lambda})}{P(O|\hat{\lambda})} # $$ # # **(2)** # Q函数第二项可以改写为 # $$ # \sum_I\left(\sum_{(t=1)}^{T-1}loga_{i,i+1}\right)P(O,I|\hat{\lambda})=\sum_{i=1}^N\sum_{j=1}^N\sum_{t=1}^{T-1}loga_{ij}P(O,i_t=i,i_{t+1}=j|\hat{\lambda}) # $$ # 注意到约束条件$\sum_{j=1}^Na_{ij}=1$,同(1)得 # $$ # a_{ij}=\frac{\sum_{t=1}^{T-1}P(O,i_t=i,i_{t+1}=j|\hat{\lambda})}{\sum_{t=1}^{T-1}P(O,i_t=i|\hat{\lambda}} # $$ # # **(3)** # Q函数第三项可改写为 # $$ # \sum_I\left(\sum_{t=1}^{T}logb_{i_t}(o_t)\right)P(O,I|\hat{\lambda})=\ # \sum_{j=1}^N\sum_{t=1}^{T}logb_{j}(o_t)P(O,i_t=j|\hat{\lambda}) # $$ # 由约束$\sum_{k=1}^Mb_j(k)=1$得 # $$ # b_j(k)=\frac{\sum_{t=1}^TP(O,i_t=j|\hat{\lambda})I(o_t=v_k)}{\sum_{t=1}^TP(O,i_t=j|\hat{\lambda})} # $$ # # 综上,若将$\gamma_t(i):=\frac{P(O,i_t=i|\hat{\lambda})}{P(O|\hat{\lambda})},\quad \zeta_t(i,j):=\ # P(i_t=i,i_{t+1}=j|O,\hat{\lambda})$,则上述公式可以表示为 # $$ # a_{ij}=\frac{\sum_{t=1}^{T-1}\zeta_t(i,j)}{\sum_{t=1}^{T-1}\gamma_t(i)}\\ # b_j(k)=\frac{\sum_{t=1,o_t=v_k}^{T}\gamma_t(j)}{\sum_{t=1}^{T-1}\gamma_t(j)}\\ # \pi_i=\gamma_1(i) # $$ # # **算法** # 输入:观测数据$O=(o_1,o_2,\cdots,o_T)$; # 输出:HMM # (1)初始化 # # 对n=0,选取$a_{ij}^{(0)},b_j(k)^{(0)},\pi_i^{(0)}$得到模型$\lambda^{(0)}=(\pi^{(0)},A^{(0)},B^{(0)}$ # # (2)递推,对$n=1,2,\cdots,$ # $$ # a_{ij}^{(n+1)}=\frac{\sum_{t=1}^{T-1}\zeta_t(i,j)}{\sum_{t=1}^{T-1}\gamma_t(i)}\\ # b_j(k)^{(n+1)}=\frac{\sum_{t=1,o_t=v_k}^{T}\gamma_t(j)}{\sum_{t=1}^{T-1}\gamma_t(j)}\\ # \pi_i^{(n+1)}=\gamma_1(i) # $$ # # (3)终止,得到HMM # # # # # # # ## 预测算法 # ### 近似算法 # 给定HMM$\lambda$和观测序列O,在t时刻处于状态$q_i$的概率$\gamma_t(i)$为 # $$ # \gamma_t(i)=\frac{\alpha_t(i)\beta_t(i)}{P(O|\lambda)}=\frac{\alpha_t(i)\beta_t(i)}{\sum_{j=1}^N\alpha_t(j)\beta_t(j)} # $$ # 在每个时刻t最可能的状态$i^*_t$是 # $$ # i^*_t=arg\max_{i\le i\le N}[\gamma_t^i],t=1,2,\cdot,T # $$ # 从而得到序列$I*=(i^*_1,i^*_2,\cdots,i^*_T)$ # ### 维特比算法 # 输入:模型$\lambda$和观测$O=(o_1,o_2,\cdots,o_T)$; # # 输出:最优路径$I*=(i^*_1,i^*_2,\cdots,i^*_T)$ # # (1)初始化 # $$ # \delta_1(i)=\pi_ib_i(o_1),i=1,2,\cdots,N\\ # \Phi_1(i)=0,i=1,2,\cdots,N # $$ # # (2)递推,对$t=2,3,\cdots,T$ # $$ # \delta_t(i)=\max_{1\le j\le N}[\delta_{t-1}(j)a_{ji}]b_i(o_t),i=1,2,\cdots,N\\ # \Phi_t(i)=arg\max_{1\le j\le N}[\delta_{t-1}(j)a_{ji}],i=1,2,\cdots,N # $$ # # (3)终止 # $$ # P^*=\max_{1\le j\le N}\delta_T(i)\\ # i_T^*=arg\max_{1\le j\le N}[\delta_T(i)] # $$
HMM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + print('Hello! Welcome to Dominos!') print('What pizza do you want?') pizza = {1 : {'name' : 'ExtraVegenza', 'price_lr' : 550,'price_md' : 250, 'price_rg' : 99}, 2 : {'name' : 'Cloud 9' , 'price_lr' : 650,'price_md' : 350, 'price_rg' : 149}, 3 : {'name' : 'Peppy Paneer', 'price_lr' : 750,'price_md' : 450, 'price_rg' : 199}, 4 : {'name' : '5 Pepper' , 'price_lr' : 850,'price_md' : 550, 'price_rg' : 249}, 5 : {'name' : 'Farmhouse' , 'price_lr' : 950,'price_md' : 650, 'price_rg' : 299}} size = {1 : 'price_lr', 2 : 'price_md', 3 : 'price_rg'} print('\n') print(pizza) print('\n') ui_pz = int(input("Enter your choice of pizza : ")) ui_sz = int(input("What should be the size : ")) ui_qn = int(input("How many pizza do you want : ")) print("----------------------") print("Name : ", pizza[ui_pz]['name']) print("Price : ", pizza[ui_pz][size[ui_sz]]) print("Quantity : ", ui_qn) print("----------------------") print("Billing Amount : ", ui_qn * pizza[ui_pz][size[ui_sz]]) print("----------------------") ui_name = input("Enter your name : ") ui_mail = input("Enter your mail_id : ") ui_Phno = int(input("Enter your Phone no : ")) del_addr = input("Enter full address: ") record = pizza[ui_pz]['name'] +","+ str(pizza[ui_pz][size[ui_sz]]) +","+str(ui_qn)+","+str(ui_qn * pizza[ui_pz][size[ui_sz]])+"\n" # + fd = open('Sales.txt', 'w') fd.write(record) fd.close() # -
Domino's Chatbot - Saving Sales/Domino's Chatbot - Saving Sales.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # First day! # Congratulations! It's your first day as a data scientist in the company! Your first project is to build a model for predicting if a movie will get a positive or negative review. # You need to start exploring your dataset. In order to create a function that will scan each movie review, you want to know how many characters every string has and print the result out together with a statement that indicate what the number refers to. To test if your function works correctly, you are going to start by analyzing only one example. # # The text of one movie review has been already saved in the variable movie. You can use print(movie) to view the variable in the IPython Shell. movie = 'fox and kelley soon become bitter rivals because the new fox books store is opening up right across the block from the small business .' # Find characters in movie variable length_string = len(movie) # Convert to string to_string = str(length_string) # Predefined variable statement = "Number of characters in this review:" # Concatenate strings and print result print(statement + " " + to_string) # # Artificial reviews # While checking out the movie reviews in your dataset, you realize that some of them show an atypical pattern. Since you should only include true reviews in your analysis, you decide to extract the suspicious ones that follow this pattern. You want to see if it is possible to artificially create reviews by using the first and last part of one example review and changing a keyword in the middle. # # The text of two movie reviews has been already saved in the variables movie1 and movie2. You can use the print() function to view the variables in the IPython Shell. # # Remember: The 1st character of a string has index 0. movie1 = 'the most significant tension of _election_ is the potential relationship between a teacher and his student .' # Select the first 32 characters of movie1 first_part = movie1[:32] # Select from 43rd character to the end of movie1 last_part = movie1[42:] movie2 = 'the most significant tension of _rushmore_ is the potential relationship between a teacher and his student .' # Select from 33rd to the 42nd character of movie2 middle_part = movie2[32:42] # Print concatenation and movie2 variable print(first_part+last_part+last_part) print(movie2) # # Palindromes # Next, you are committed to find any peculiarity in the words included in the movie review dataset. A palindrome is a sequence of characters which can be read the same backward as forward, for example: Madam or No lemon, no melon. You realize that there are some funny movie names that can have this characteristic. You want to make a list of all movie titles that are funny palindromes but you will start by analyzing one example. # # In python, you can also specify steps by using a third index. If you don't specify the first or second index and the third one is negative, it will return the characters jumping and backwards. # # The text of a movie review for one example has been already saved in the variable movie. You can use print(movie) to view the variable in the IPython Shell. # + # Get the word movie_title = movie[11:30] # Obtain the palindrome palindrome = movie_title[::-1] # Print the word if it's a palindrome if movie_title == palindrome: print(movie_title) # - # # Normalizing reviews # It's time to extract some important words present in your movie review dataset. First, you need to normalize them and then, count their frequency. Part of the normalization implies converting all the words to lowercase, removing special characters and extracting the root of a word so you count the variants as one. # # So imagine you have the following reviews: The movie surprises me very much and Marvel movies always surprise their audience. If you count the word frequency, you will count surprises one time and surprise one time. However, the verb surprise appears in both and its frequency should be two. # # The text of a movie review for only one example has been already saved in the variable movie. You can use print(movie) to view the variable in the IPython Shell. movie = '$I supposed that coming from MTV Films I should expect no less$' # Convert to lowercase and print the result movie_lower = movie.lower() print(movie_lower) # Remove specified character and print the result movie_no_sign = movie_lower.strip("$") print(movie_no_sign) # Split the string into substrings and print the result movie_split = movie_no_sign.split() print(movie_split) # Select root word and print the result word_root = movie_split[1][:-1] print(word_root) # # Time to join! # While normalizing your text, you noticed that one review had a particular structure. This review ends with the HTML tag <\i> and it has a lot of commas in different places of the sentence. You decide to remove the tag from the end and use the strategy of splitting the string and joining it back again without the commas. # # The text of a movie review has been already saved in the variable movie. You can use print(movie) to view the variable in the IPython Shell. movie = 'the film,however,is all good<\\i>' # Remove tags happening at the end and print results movie_tag = movie.strip("<\i>") print(movie_tag) # Split the string using commas and print results movie_no_comma = movie_tag.split(",") print(movie_no_comma) # Join back together and print results movie_join = ' '.join(movie_no_comma) print(movie_join) # # Split lines or split the line? # You are about to leave work when a colleague asks you to use your string manipulation skills to help him. You need to read strings from a file in a way that if the file contains strings on different lines, they are stored as separate elements. He also wants you to break the strings into pieces if you see that they contain commas. # # The text of the file has been already saved in the variable file. You can use print(file) to view the variable in the IPython Shell. # + file = "mtv films election, a high school comedy, is a current example from there, director <NAME> wastes no time, taking us into the water on a midnight swim" # Split string at line boundaries file_split = file.split('\n') # Print file_split print(file_split) # Complete for-loop to split by commas for substring in file_split: substring_split = substring.split(',') print(substring_split) # - # # Finding a substring # It's a new day at work and you need to continue cleaning your dataset for the movie prediction project. While exploring the dataset, you notice a strange pattern: there are some repeated, consecutive words occurring between the character at position 37 and the character at position 41. You decide to write a function to find out which movie reviews show this peculiarity, remembering that the ending position you specify is not inclusive. If you detect the word, you also want to change the string by replacing it with only one instance of the word. # # Complete the if-else statement following the instructions. # # The text of three movie reviews has been already saved in the variable movies. You can use print(movies) to view the variable in the IPython Shell. # + # import pandas as pd import pandas as pd # import numpy as np import numpy as np data = { 200: "it's clear that he's passionate about his beliefs , and that he's not just a punk looking for an excuse to beat people up .", 201: "I believe you I always said that the actor actor actor is amazing in every movie he has played", 202: "it's astonishing how frightening the actor actor norton looks with a shaved head and a swastika on his chest." } movies = pd.Series(data) for movie in movies: # Find if actor occurrs between 37 and 41 if movie.find('actor', 37, 42) == -1: print("Word not found") # Count occurrences and replace two by one elif movie.count('actor') == 2: print(movie.replace('actor actor', 'actor')) else: # Replace three occurrences by one print(movie.replace('actor actor actor', 'actor')) # - # # Where's the word? # Before finishing cleaning your dataset, you want to check if a specific word occurs in the reviews. You noticed earlier a specific pattern in the strings. Now, you want to create a function to check if a word is present between characters with index 12, and 50, remembering that ending position is exclusive, and print out the lowest index where this word occurs. There are two methods to handle this situation. You want to see which one works best. # # The text of two movie reviews has been already saved in the variable movies. You can use print(movies) to view the variable in the IPython Shell. # + # import pandas as pd import pandas as pd # import numpy as np import numpy as np data = { 137: "heck , jackie doesn't even have enough money for a haircut , looks like , much less a personal hairstylist .", 138: "in condor , chan plays the same character he's always played , himself , a mixture of bruce lee and tim allen , a master of both kung-fu and slapstick-fu .", } movies = pd.Series(data) for movie in movies: # Find the first occurrence of word print(movie.find('money', 12, 51)) # - for movie in movies: try: # Find the first occurrence of word print(movie.index('money', 12, 51)) except ValueError: print("substring not found") # # Replacing negations # In order to keep working with your prediction project, your next task is to figure out how to handle negations that occur in your dataset. Some algorithms for prediction do not work well with negations, so a good way to handle this is to remove either not or n't, and to replace the next word by its antonym. # # Let's imagine that you have the string: The movie isn't good. You will need to remove n't and replace good for bad. This way, your string ends up being The movie is bad. You notice that in the first column of the dataset, you have a string that uses the word isn't followed by important. # # The text of this column has been already saved in the variable movies so you start working with it. You can use print(movies) to view it in the IPython Shell. # + movies = "the rest of the story isn't important because all it does is serve as a mere backdrop for the two stars to share the screen ." # Replace negations movies_no_negation = movies.replace("isn't", "is") # Replace important movies_antonym = movies_no_negation.replace("important", "insignificant") # Print out print(movies_antonym) # -
regular-expressions-in-python/1. Basic Concepts of String Manipulation/notebook_section_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dropout and maxout # In this lab we will explore the methods of [dropout](https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf), a regularisation method which stochastically drops out activations from the model during training, and [maxout](http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf), another non-linear transformation that can be used in multiple layer models. This is based on material covered in the [fifth lecture slides](http://www.inf.ed.ac.uk/teaching/courses/mlp/2016/mlp05-hid.pdf). # ## Exercise 1: Implementing a dropout layer # # During training the forward propagation through a dropout layer produces outputs where a subset of the input dimensions are set to zero ('dropped out'). The dimensions to be dropped out are randomly sampled for each new batch, with each dimension having a probability $p$ of being included and the inclusion (or not) of each dimension independent of all the others. If the inputs to a dropout layer are $D$ dimensional vectors then we can represent the dropout operation by an elementwise multiplication by a $D$ dimensional *binary mask* vector $\boldsymbol{m} = \left[m_1 ~ m_2 ~\dots~ m_D\right]^{\rm T}$ where $m_d \sim \text{Bernoulli}(p) ~~\forall d \in \lbrace 1 \dots D\rbrace$. # # As a first step implement a `random_binary_mask` function in the cell below to generate a binary mask array of a specified shape, where each value in the outputted array is either a one with probablity `prob_1` or zero with probability `1 - prob_1` and all values are sampled independently. def random_binary_mask(prob_1, shape, rng): """Generates a random binary mask array of a given shape. Each value in the outputted array should be an indepedently sampled binary value i.e in {0, 1} with the probability of each value being 1 being equal to `prob_1`. Args: prob_1: Scalar value in [0, 1] specifying probability each entry in output array is equal to one. shape: Shape of returned mask array. rng (RandomState): Seeded random number generator object. Returns: Random binary mask array of specified shape. """ return rng.uniform(size=shape) < prob_1 # Test your `random_binary_mask` function using the cell below (if your implementation is incorrect you will get an `AssertionError` - look at what the assert statement is checking for a clue as to what is wrong). import numpy as np test_shapes = [(1, 1000), (10, 10, 10)] test_probs = [0.1, 0.5, 0.7] for i in range(10): for shape in test_shapes: for prob in test_probs: output = random_binary_mask(prob, shape, np.random) # Check generating correct shape output assert output.shape == shape # Check all outputs are binary values assert np.all((output == 1.) | (output == 0.)) # Check proportion equal to one plausible # This will be noisy so there is a chance this will error # even for a correct implementation assert np.abs(output.mean() - prob) < 0.1 # Given a randomly sampled binary mask $\boldsymbol{m}$, the outputs $\lbrace \boldsymbol{y}^{(b)} \rbrace_{b=1}^B$ of the stochastic forward propagation through a dropout layer given a batch of inputs $\lbrace \boldsymbol{x}^{(b)} \rbrace_{b=1}^B$ can be calculated by simply performing an elementwise multiplication of the inputs with the mask # # \begin{equation} # y^{(b)}_d = m_k x^{(b)}_d \qquad \forall d \in \lbrace 1 \dots D \rbrace # \end{equation} # # The corresponding partial derivatives required for implementing back-propagation through a dropout layer are # # \begin{equation} # \frac{\partial y^{(b)}_k}{\partial x^{(b)}_d} = # \begin{cases} # m_k & \quad k = d \\ # 0 & \quad k \neq d # \end{cases} # \qquad \forall k,\,d \in \lbrace 1 \dots D \rbrace # \end{equation} # # As discussed in the lecture slides, when using a model trained with dropout at test time dimensions are no longer stochastically dropped out and instead all activations are deterministically fed forward through the model. So that the expected (mean) outputs of each layer are the same at test and training we scale the forward propagated inputs during testing by $p$ the probability of each dimension being included in the output. If we denote the deterministically forward-propagated batch of outputs of a dropout layer at test time as $\lbrace \boldsymbol{z}^{(b)} \rbrace_{b=1}^B$ then we have # # \begin{equation} # z^{(b)}_d = # \mathbb{E}\left[ y^{(b)}_d \right] = # \sum_{m_d \in \lbrace 0,1 \rbrace} \left( \mathbb{P}\left[\mathrm{m}_d = m_d\right] m_d x^{(b)}_d \right) = # (p) (1) x^{(b)}_d + (1-p) (0) x^{(b)}_d = # p x^{(b)}_d \qquad \forall d \in \lbrace 1 \dots D \rbrace # \end{equation} # # To allow switching between this stochastic training time behaviour and deterministic test time behaviour, a new abstract `StochasticLayer` class has been defined in the `mlp.layers` module. This acts similarly to the layer objects we have already encountered other than adding an extra boolean argument `stochastic` to the `fprop` method interface. When `stochastic = True` (the default) a stochastic forward propagation should be caculated, for dropout this corresponding to $\boldsymbol{x}^{(b)} \to \boldsymbol{y}^{(b)}$ above. When `stochastic = False` a deterministic forward-propagation corresponding to the expected output of the stochastic forward-propagation should be calculated, for dropout this corresponding to $\boldsymbol{x}^{(b)} \to \boldsymbol{z}^{(b)}$ above. # # Using the skeleton `DropoutLayer` class definition below, implement the `fprop` and `bprop` methods. You may wish to store the binary mask used in the forward propagation as an attribute of the class for use in back-propagation - it is fine to assume that the `fprop` and `bprop` will always be called in sync. # + from mlp.layers import StochasticLayer class DropoutLayer(StochasticLayer): """Layer which stochastically drops input dimensions in its output.""" def __init__(self, rng=None, incl_prob=0.5, share_across_batch=True): """Construct a new dropout layer. Args: rng (RandomState): Seeded random number generator. incl_prob: Scalar value in (0, 1] specifying the probability of each input dimension being included in the output. share_across_batch: Whether to use same dropout mask across all inputs in a batch or use per input masks. """ super(DropoutLayer, self).__init__(rng) assert incl_prob > 0. and incl_prob <= 1. self.incl_prob = incl_prob self.share_across_batch = share_across_batch def fprop(self, inputs, stochastic=True): """Forward propagates activations through the layer transformation. Args: inputs: Array of layer inputs of shape (batch_size, input_dim). stochastic: Flag allowing different deterministic forward-propagation mode in addition to default stochastic forward-propagation e.g. for use at test time. If False a deterministic forward-propagation transformation corresponding to the expected output of the stochastic forward-propagation is applied. Returns: outputs: Array of layer outputs of shape (batch_size, output_dim). """ if stochastic: mask_shape = (1,) + inputs.shape[1:] if self.share_across_batch else inputs.shape self._mask = (rng.uniform(size=mask_shape) < self.incl_prob) return inputs * self._mask else: return inputs * self.incl_prob def bprop(self, inputs, outputs, grads_wrt_outputs): """Back propagates gradients through a layer. Given gradients with respect to the outputs of the layer calculates the gradients with respect to the layer inputs. This should correspond to default stochastic forward-propagation. Args: inputs: Array of layer inputs of shape (batch_size, input_dim). outputs: Array of layer outputs calculated in forward pass of shape (batch_size, output_dim). grads_wrt_outputs: Array of gradients with respect to the layer outputs of shape (batch_size, output_dim). Returns: Array of gradients with respect to the layer inputs of shape (batch_size, input_dim). """ return grads_wrt_outputs * self._mask def __repr__(self): return 'DropoutLayer(incl_prob={0:.1f})'.format(self.incl_prob) # - # Test your implementation by running the cell below (if your implementation is incorrect you will get an `AssertionError` - look at what the assert statement is checking for a clue as to what is wrong). seed = 31102016 rng = np.random.RandomState(seed) test_incl_probs = [0.1, 0.5, 0.7] input_shape = (5, 10) for incl_prob in test_incl_probs: layer = DropoutLayer(rng, incl_prob) inputs = rng.normal(size=input_shape) grads_wrt_outputs = rng.normal(size=input_shape) for t in range(100): outputs = layer.fprop(inputs, stochastic=True) # Check outputted array correct shape assert outputs.shape == inputs.shape # Check all outputs are either equal to inputs or zero assert np.all((outputs == inputs) | (outputs == 0)) grads_wrt_inputs = layer.bprop(inputs, outputs, grads_wrt_outputs) # Check back-propagated gradients only non-zero for non-zero outputs assert np.all((outputs != 0) == (grads_wrt_inputs != 0)) assert np.all(grads_wrt_outputs[outputs != 0] == grads_wrt_inputs[outputs != 0]) det_outputs = layer.fprop(inputs, stochastic=False) # Check deterministic fprop outputs are correct shape assert det_outputs.shape == inputs.shape # Check deterministic fprop outputs scaled correctly assert np.allclose(det_outputs, incl_prob * inputs) # ### Optional extension # # Above we assumed the same dropout mask was applied to each input in a batch, as specified in the lecture slides. In practice sometimes a different mask is sampled for each input. As an extension you could try implementing this per-input form of dropout either by defining a new layer or adding an extra argument to the constructor of the above layer which allows you to switch between the two forms. # ## Exercise 2: Training with dropout # Experiment with training models with dropout layers to classify MNIST digits. Code has been provided below as a starting point for setting up the model objects though feel free to use any additional adaptive learning rules or learning rule schedulers you wrote during the coursework instead. You may also wish to change the model architecture to use a larger model with more parameters in which the regularisation provided by dropout is likely to have a more pronounced effect. You will probably also find that models with dropout generally need to be trained over more epochs than those without (can you suggest why this might be?). # # You should training with a few different `incl_prob` settings for the dropout layers and try to establish how the values chosen affect the training performance. You may wish to experiment with using a different dropout probability at the input than for the intermediate layers (why?). # # You may wish to start reading through and implementing exercise 3 while waiting for training runs to complete. # + import numpy as np import logging from mlp.data_providers import MNISTDataProvider from mlp.models import MultipleLayerModel from mlp.layers import ReluLayer, AffineLayer from mlp.errors import CrossEntropySoftmaxError from mlp.initialisers import GlorotUniformInit, ConstantInit from mlp.learning_rules import MomentumLearningRule from mlp.optimisers import Optimiser import matplotlib.pyplot as plt # %matplotlib inline # Seed a random number generator seed = 31102016 rng = np.random.RandomState(seed) # Set up a logger object to print info about the training run to stdout logger = logging.getLogger() logger.setLevel(logging.INFO) logger.handlers = [logging.StreamHandler()] # Create data provider objects for the MNIST data set train_data = MNISTDataProvider('train', batch_size=50, rng=rng) valid_data = MNISTDataProvider('valid', batch_size=50, rng=rng) # + # Probability of input being included in output in dropout layer incl_prob = 0.5 input_dim, output_dim, hidden_dim = 784, 10, 125 # Use Glorot initialisation scheme for weights and zero biases weights_init = GlorotUniformInit(rng=rng, gain=2.**0.5) biases_init = ConstantInit(0.) # Create three affine layer model with rectified linear non-linearities # and dropout layers before every affine layer model = MultipleLayerModel([ DropoutLayer(rng, incl_prob), AffineLayer(input_dim, hidden_dim, weights_init, biases_init), ReluLayer(), DropoutLayer(rng, incl_prob), AffineLayer(hidden_dim, hidden_dim, weights_init, biases_init), ReluLayer(), DropoutLayer(rng, incl_prob), AffineLayer(hidden_dim, output_dim, weights_init, biases_init) ]) # Multiclass classification therefore use cross-entropy + softmax error error = CrossEntropySoftmaxError() # Use a momentum learning rule - you could use an adaptive learning rule # implemented for the coursework here instead learning_rule = MomentumLearningRule(0.02, 0.9) # Monitor classification accuracy during training data_monitors={'acc': lambda y, t: (y.argmax(-1) == t.argmax(-1)).mean()} optimiser = Optimiser( model, error, learning_rule, train_data, valid_data, data_monitors, notebook=True) num_epochs = 100 stats_interval = 5 stats, keys, run_time = optimiser.train(num_epochs=num_epochs, stats_interval=stats_interval) # Plot the change in the validation and training set error over training. fig_1 = plt.figure(figsize=(8, 4)) ax_1 = fig_1.add_subplot(111) for k in ['error(train)', 'error(valid)']: ax_1.plot(np.arange(1, stats.shape[0]) * stats_interval, stats[1:, keys[k]], label=k) ax_1.legend(loc=0) ax_1.set_xlabel('Epoch number') # Plot the change in the validation and training set accuracy over training. fig_2 = plt.figure(figsize=(8, 4)) ax_2 = fig_2.add_subplot(111) for k in ['acc(train)', 'acc(valid)']: ax_2.plot(np.arange(1, stats.shape[0]) * stats_interval, stats[1:, keys[k]], label=k) ax_2.legend(loc=0) ax_2.set_xlabel('Epoch number') # - # ## Exercise 3: Implementing maxout # # [Maxout](http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf) can be considered a generalisation of the rectified linear transformation implemented in the previous lab. # # For a rectified linear (`Relu`) layer the forward propagation corresponds to # # \begin{equation} # y^{(b)}_k = # \max\left\lbrace 0,\,x^{(b)}_k \right\rbrace # \end{equation} # # i.e. each output corresponds to an pairwise maximum of a constant (0) and the input. # # Instead of taking the maximum of the input and a constant, we could instead consider taking the maximum over sets of inputs of a fixed size $s$. # # \begin{equation} # y^{(b)}_k = # \max\left\lbrace x^{(b)}_{(k-1)s + 1},\, x^{(b)}_{(k-1)s + 2},\, \dots ,\, x^{(b)}_{ks} \right\rbrace # \end{equation} # # If these inputs $x^{(b)}_d$ are themselves the outputs of an affine layer, then this corresponds to taking the maximum of a series of affine functions of the previous layer outputs. Like a rectified linear layer this leads to piecewise linear input-output relationships (which have well-behaved gradients which do not suffer from the saturation problems of logistic sigmoid / hyperbolic tangent transformations) but unlike the rectified linear case we do not end force a portion of the outputs to be zero. # # Experimentally this form of transformation has been found to give good performance, with the name *maxout* chosen because the *out*put is the *max*imum of a set of inputs. Maxout is also commonly used with dropout layers however note they are not directly related - maxout defines a deterministic non-linear transformation which can help improve the representational capacity and trainability of models; dropout defines a stochastic transformation which is mainly aimed at regularising a model to reduce overfitting. # # Using layers which take the maximum of fixed sized sets of inputs is also a common technique in models with convolutional layers which we will cover later in the course, with here the layer commonly being termed a *max-pooling* layer (with there being natural generalisation to other choices of reduction functions over pools such as the mean). We will adopt this terminology here for a layer implementing the transformation described above and we will be able to reuse our code implementing this maximum operation when experimenting with convolutional models. # # The partial derivatives of this max-pooling transformation are sparse (lots of values are zero), with only the partial derivative of the output of a pool with respect to the maximum input in the pool non-zero. This can be expressed as # # \begin{equation} # \frac{\partial y^{(b)}_k}{\partial x^{(b)}_d} = # \begin{cases} # 1 & \quad (k-1)s + 1 \leq d \leq ks \quad\textrm{and} &x^{(b)}_d = \max\left\lbrace x^{(b)}_{(k-1)s + 1},\, x^{(b)}_{(k-1)s + 2},\, \dots ,\, x^{(b)}_{ks} \right\rbrace \\ # 0 & \quad \textrm{otherwise} # \end{cases}. # \end{equation} # # Using these definitions implement the `fprop` and `bprop` methods of the skeleton `MaxPoolingLayer` class below. # # Some hints # # * One way of organising the inputs into non-overlapping pools is using the `numpy.reshape` function. # * The `numpy.max` function has an `axis` argument which allows you specify the axis (dimension) of the input array to take the maximum over. # * It may help to construct a binary mask corresponding to the definitions of the partial derivatives above to allow you to implement the `bprop` method. # * As with the `DropoutLayer` it is fine to temporarily store values calculated in the `fprop` method as attributes of the object (e.g. `self.val = val`) to use in the `bprop` method (although you don't necessarily need to do this). # + from mlp.layers import Layer class MaxPoolingLayer(Layer): def __init__(self, pool_size=2): """Construct a new max-pooling layer. Args: pool_size: Positive integer specifying size of pools over which to take maximum value. The outputs of the layer feeding in to this layer must have a dimension which is a multiple of this pool size such that the outputs can be split in to pools with no dimensions left over. """ self.pool_size = pool_size def fprop(self, inputs): """Forward propagates activations through the layer transformation. This corresponds to taking the maximum over non-overlapping pools of inputs of a fixed size `pool_size`. Args: inputs: Array of layer inputs of shape (batch_size, input_dim). Returns: outputs: Array of layer outputs of shape (batch_size, output_dim). """ assert inputs.shape[-1] % self.pool_size == 0, ( 'Last dimension of inputs must be multiple of pool size') pooled_inputs = inputs.reshape( inputs.shape[:-1] + (inputs.shape[-1] // self.pool_size, self.pool_size)) pool_maxes = pooled_inputs.max(-1) self._mask = pooled_inputs == pool_maxes[..., None] return pool_maxes def bprop(self, inputs, outputs, grads_wrt_outputs): """Back propagates gradients through a layer. Given gradients with respect to the outputs of the layer calculates the gradients with respect to the layer inputs. Args: inputs: Array of layer inputs of shape (batch_size, input_dim). outputs: Array of layer outputs calculated in forward pass of shape (batch_size, output_dim). grads_wrt_outputs: Array of gradients with respect to the layer outputs of shape (batch_size, output_dim). Returns: Array of gradients with respect to the layer inputs of shape (batch_size, input_dim). """ return (self._mask * grads_wrt_outputs[..., None]).reshape(inputs.shape) def __repr__(self): return 'MaxPoolingLayer(pool_size={0})'.format(self.pool_size) # - # Test your implementation by running the cell below. test_inputs = np.array([[-3, -4, 5, 8], [0, -2, 3, -8], [1, 5, 3, 2]]) test_outputs_1 = np.array([[8], [3], [5]]) test_grads_wrt_outputs_1 = np.array([[10], [5], [-3]]) test_grads_wrt_inputs_1 = np.array([[0, 0, 0, 10], [0, 0, 5, 0], [0, -3, 0, 0]]) test_outputs_2 = np.array([[-3, 8], [0, 3], [5, 3]]) test_grads_wrt_outputs_2 = np.array([[3, -1], [2, 5], [5, 3]]) test_grads_wrt_inputs_2 = np.array([[3, 0, 0, -1], [2, 0, 5, 0], [0, 5, 3, 0]]) layer_1 = MaxPoolingLayer(4) layer_2 = MaxPoolingLayer(2) # Check fprop with pool_size = 4 assert np.allclose(layer_1.fprop(test_inputs), test_outputs_1) # Check bprop with pool_size = 4 assert np.allclose( layer_1.bprop(test_inputs, test_outputs_1, test_grads_wrt_outputs_1), test_grads_wrt_inputs_1 ) # Check fprop with pool_size = 2 assert np.allclose(layer_2.fprop(test_inputs), test_outputs_2) # Check bprop with pool_size = 2 assert np.allclose( layer_2.bprop(test_inputs, test_outputs_2, test_grads_wrt_outputs_2), test_grads_wrt_inputs_2 ) # ## Exercise 4: Training with maxout # # Use your `MaxPoolingLayer` implementation in a multiple layer models to experiment with how well maxout networks are able to classify MNIST digits. As with the dropout training exercise, code has been provided below as a starting point for setting up the model objects, but again feel free to substitute any components. # # If you have time you may wish to experiment with training a model using a combination of maxout and dropout or another regularisation method covered in the last lab notebook. # + import numpy as np import logging from mlp.data_providers import MNISTDataProvider from mlp.models import MultipleLayerModel from mlp.layers import AffineLayer from mlp.errors import CrossEntropySoftmaxError from mlp.initialisers import GlorotUniformInit, ConstantInit from mlp.learning_rules import MomentumLearningRule from mlp.optimisers import Optimiser import matplotlib.pyplot as plt # %matplotlib inline # Seed a random number generator seed = 31102016 rng = np.random.RandomState(seed) # Set up a logger object to print info about the training run to stdout logger = logging.getLogger() logger.setLevel(logging.INFO) logger.handlers = [logging.StreamHandler()] # Create data provider objects for the MNIST data set train_data = MNISTDataProvider('train', batch_size=50, rng=rng) valid_data = MNISTDataProvider('valid', batch_size=50, rng=rng) # + # Size of pools to take maximum over pool_size = 2 input_dim, output_dim, hidden_dim = 784, 10, 100 # Use Glorot initialisation scheme for weights and zero biases weights_init = GlorotUniformInit(rng=rng) biases_init = ConstantInit(0.) # Create three affine layer model interleaved with max-pooling layers model = MultipleLayerModel([ AffineLayer(input_dim, hidden_dim * pool_size, weights_init, biases_init), MaxPoolingLayer(pool_size), AffineLayer(hidden_dim, hidden_dim * pool_size, weights_init, biases_init), MaxPoolingLayer(pool_size), AffineLayer(hidden_dim, output_dim, weights_init, biases_init) ]) # Multiclass classification therefore use cross-entropy + softmax error error = CrossEntropySoftmaxError() # Use a momentum learning rule - you could use an adaptive learning rule # implemented for the coursework here instead learning_rule = MomentumLearningRule(0.02, 0.9) # Monitor classification accuracy during training data_monitors={'acc': lambda y, t: (y.argmax(-1) == t.argmax(-1)).mean()} optimiser = Optimiser( model, error, learning_rule, train_data, valid_data, data_monitors, notebook=True) num_epochs = 100 stats_interval = 5 stats, keys, run_time = optimiser.train(num_epochs=num_epochs, stats_interval=stats_interval) # Plot the change in the validation and training set error over training. fig_1 = plt.figure(figsize=(8, 4)) ax_1 = fig_1.add_subplot(111) for k in ['error(train)', 'error(valid)']: ax_1.plot(np.arange(1, stats.shape[0]) * stats_interval, stats[1:, keys[k]], label=k) ax_1.legend(loc=0) ax_1.set_xlabel('Epoch number') # Plot the change in the validation and training set accuracy over training. fig_2 = plt.figure(figsize=(8, 4)) ax_2 = fig_2.add_subplot(111) for k in ['acc(train)', 'acc(valid)']: ax_2.plot(np.arange(1, stats.shape[0]) * stats_interval, stats[1:, keys[k]], label=k) ax_2.legend(loc=0) ax_2.set_xlabel('Epoch number') # -
notebooks/06_Dropout_and_maxout.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # ## Visualize Covid19 Data in Python # ## data source # The data is from [European Centre for Disease Prevention and Control](https://www.ecdc.europa.eu/en/publications-data/download-todays-data-geographic-distribution-covid-19-cases-worldwide) # ![covid image](https://www.jmu.edu/_images/news/2020/Covid-19Dashboard-06.png) # %matplotlib inline import pandas # ## a quick view of the data df = pandas.read_excel('s3://samia-ia241-spring2021/COVID-19-geographic-disbtribution-worldwide-2020-12-14.xls') df[:10] # ## trend of the number of cases sum_cases_per_day=df.groupby('dateRep').sum()['cases'] sum_cases_per_day.plot() # ## the top 10 countries with the highest deaths sum_death_per_country=df.groupby('countriesAndTerritories').sum()['deaths'] sum_death_per_country.nlargest(10).plot.bar() # ## List of all countries pandas.unique(df['countriesAndTerritories']) # ## The USA data # + usa_data = df.loc[ df['countriesAndTerritories']== 'United_States_of_America'] usa_data[:10] # - # ## how the # death is related to the to the # case in the USA usa_data.plot.scatter(x='cases',y='deaths',c='month')
lab13.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### pylegoclassifier workbook # ### magnus wood, december 2020, bsyse 530 semester project # The below code block will be used in the 'pylegoclassifer.py' module. It will be used in the matlab integration, where images obtained by Eric will use functions from this code to do lego color classification. # # This jupyter notebook exists solely for developing it. I should probably share it too. # # ### pylegoclassifier.py functionality # ### The code needs to do this: # # 1. Take an image file in and ensure it is in the right format. # 2. Perform background segmentation using ImageSegmentation. # 3. Data extraction: # a. # b. # 4. Pass the dataframe to the # + # #%%writefile pylegoclassifier.py # import the needed packages import numpy as np from matplotlib import pyplot as plt import cv2 as cv from scipy import ndimage from skimage import morphology from skimage import exposure import os from math import pi from math import isnan import pandas as pd from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, precision_score, recall_score from skimage.filters import sobel # set random seed np.random.seed(26) # the NaiveBayes classifier I wrote for assignment 6 in BSYSE_530, modified a little for this purpose class NaiveBayes: # P(c|x) = P(x|c) * P(c) / P(x) # P(x|x) is the posterior probability # P(x|c) is the likelihood # P(c) is the class prior probability, or the prob of c occuring indpendently. # P(x) is the predictor prior probability, or the prob of x occuring independently def fit(self, features, target): # define class variables self.classes = np.unique(target) self.count = len(self.classes) self.feature_nums = features.shape[1] self.rows = features.shape[0] # calculate statistics for all those features self.calc_statistics(features, target) # prior is the random chance of drawing a particular class based on its proportion in the dataset self.prior = self.calc_prior(features, target) def get_predictions(self, input_vector): predictions = [] for i in range(len(input_vector)): result = self.calc_posterior((input_vector.iloc[i,:])) predictions.append(result) return predictions def predict(self, observation): #call the calc_posterior function on the observation pred_class = self.calc_posterior(observation) return pred_class def calc_statistics(self, features, target): # calculate mean, variance for each column and convert to numpy array self.mean = features.groupby(target).apply(np.mean).to_numpy() self.var = features.groupby(target).apply(np.var).to_numpy() return self.mean, self.var def calc_prior(self, features, target): # this is the probability of picking one of a class at random from the dataset self.prior = (features.groupby(target).apply(lambda x: len(x)/self.rows).to_numpy()) return self.prior def calc_posterior(self, x): # this is the probability, post evidence # x is a numpy array # x is feature vector for one observation # make a list that we will add each classes posterior prob to posteriors = [] # iterate through the classes for i in range(0, self.count): # for each class look at the prior probability for the class prior = self.prior[i] # calculate the conditional probability for the conditional = np.sum(self.gaussian_density(i, x)) posterior = prior + conditional # print(f"i = {i}, prior = {prior}, conditional = {conditional}, posterior = {posterior}") posteriors.append(posterior) return self.classes[np.argmax(posteriors)] def gaussian_density(self, class_idx, x): # calc probability from gaussian denssityy fucntion (normal dist) mean = self.mean[class_idx] var = self.var[class_idx] # this part sucked and I had a typo that cost me hours numerator = np.exp(-((x-mean)**2 / (2 * var))) denominator = np.sqrt(2 * np.pi * var) return numerator / denominator def pdf(self, x, mean, stdev): # calculate probability density function exponent = np.exp(-((x-mean)**2 / (2*stdev**2))) return exponent * (1/(np.sqrt(2*np.pi)*stdev)) def get_accuracy(self, test, predictions): correct = 0 for i in range(len(test)): if test.iloc[i] == predictions[i]: correct += 1 return (correct / float(len(test))) # TODO: read these and see how it works # https://www.mathworks.com/help/matlab/matlab_external/matlab-arrays-as-python-variables.html # https://www.mathworks.com/help/matlab/matlab_external/passing-data-to-python.html # this exists only for my testing purposes class MatlabSurrogate(): def __init__(self): self.state_of_mind = "Badass." def acquire_kinect_image(self, filename): # give this function a filename, and it will load that image with opencv # this will be a BGR format, because that is how opencv rolls kinect_image = cv.imread(filename) print(f"kinect has acquired the image with shape = {kinect_image.shape}") return kinect_image # function to display images resized, using opencv def imshow(self, image): w, h = int(image.shape[1]/4), int(image.shape[0]/4) cv.namedWindow("output", cv.WINDOW_NORMAL) cv.resizeWindow("output", (w, h)) cv.imshow("output", image) cv.waitKey(0) cv.destroyAllWindows() # I should probably have one image processing class that takes in a single image and then spits out a dataframe that could be used for prediction # replaces ImageSegmenter class ImageProcess(): def __init__(self): print("image processor activated! use 'process_image_to_df()' to get back a pandas df") def dummy_method(self, a): if type(a) is np.ndarray: result = "object is a numpy.ndarray, this is perfect. Is the image RGB order or BGR?" return result else: result = "object is a " + str(type(a)) + "and I'm gonna have a hard time with that" return result def bg_segmentation(self, image, mode="hsv"): if mode=="sobel": from skimage.filters import sobel gray_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY) # find the edges elev_map = sobel(gray_image) # threshold it foreground = np.zeros_like(image) foreground[gray_image < 30] = 1 foreground[gray_image > 150] = 2 #TODO add this else: # # gaussian blur # blur_image = ndimage.gaussian_filter(image, sigma=4) # create an hsv mask for red colors color_mask = cv.inRange(cv.cvtColor(image, cv.COLOR_BGR2HSV), (0, 0, 100), (180, 255, 255)).astype(np.uint8) black_mask = cv.inRange(cv.cvtColor(image, cv.COLOR_BGR2HSV), (0, 0, 0), (179, 255, 30)).astype(np.uint8) # hsv_mask = black_mask + color_mask hsv_mask = black_mask + color_mask hsv_mask = np.where(hsv_mask > 0, 1, 0).astype(np.uint8) # # erode the mask # hsv_mask = morphology.erosion(hsv_mask, morphology.disk(5)) # # gaussian blur hsv_mask = ndimage.gaussian_filter(hsv_mask, sigma=1) # erode the mask hsv_mask = morphology.erosion(hsv_mask, morphology.disk(5)) # median filter to despeckle hsv_mask = ndimage.median_filter(hsv_mask, size=(3, 3)).astype(np.uint8) # binary dilation hsv_mask = morphology.binary_dilation(hsv_mask, np.ones((20, 20))).astype(np.uint8) # fill the holes hsv_mask = ndimage.binary_fill_holes(hsv_mask).astype(np.uint8) # erode the mask hsv_mask = morphology.erosion(hsv_mask, morphology.disk(5)) # TODO: remove this it is for testing purposes to show the segmentation m = MatlabSurrogate() m.imshow(cv.bitwise_and(image, image, mask=hsv_mask).astype(np.uint8)) # apply the mask and return the result return hsv_mask def bg_segmentation_eucdist(self, img_cube, roi_origin=(50, 50)): def euc_dist(roi_channels, sample_channels): dist = [(roi_channels[i] - sample_channels[i])**2 for i in range(0, len(sample_channels))] euc_dist = np.sqrt(np.sum(dist)) return euc_dist # variables dist_th = 150 # define the roi using these values and use it to subset my_image and return the subset image roi = np.array(img_cube[roi_origin[0]:roi_origin[0]+20, roi_origin[1]:roi_origin[1]+20,:]) ################################################################ # calculate the mean intensity value for the roi at each channel and store in a vector roi_mean_vector = np.zeros(shape=(img_cube.shape[2], 1)) # iterate through all the channels for channel in range(0, img_cube.shape[2]): # channel of interest, reshaped to a vector coi = img_cube[:,:,channel] coi_vector = coi.reshape((img_cube.shape[0]* img_cube.shape[1]), 1) # mean intensity for the channel added to intensity vector roi_mean_vector[channel] = np.mean(coi_vector) ################################################################# # knn output_array = np.zeros(shape=(img_cube.shape[0], img_cube.shape[1])) # time this process import time start_time = time.time() for i in range(0, output_array.shape[0]): for j in range(0, output_array.shape[1]): # calculate the euc distance from the pixel[i,j] to roi_mean_vector distance = euc_dist(roi_mean_vector, img_cube[i, j]) if distance < dist_th: output_array[i, j] = 1 print(time.time() - start_time) # TODO: image enhancement on the output array to get rid of holes # label the objects labels, num_features = ndimage.measurements.label(output_array) # retain only the object 1, the apple mask = np.where(labels == 1, 1, 0).reshape(output_array.shape) # median filter to denoise mask = ndimage.median_filter(mask, size=(3, 3)).astype(np.int) return mask # this is the parent function of this class, it will call the other classes def process_image_to_df(self, image, area_th): # get a mask by background segmentation using hsv values mask = self.bg_segmentation(image) # output image with drawn on contours output_image = image.copy() # find the contours of the detected objects in the image contours, hier = cv.findContours(mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) # create the df that we'll return for this image df = pd.DataFrame(columns=['y']) # blank canvas cimg = np.zeros_like(image) # reset the object num object_num = 0 # draw all the contours on the image for cnt in contours: # blank canvas cimg_subset = np.zeros_like(image) # get the x, y, w, h of the bounding rect for the contour x, y, w, h = cv.boundingRect(cnt) # contour features area = cv.contourArea(cnt) rect_area = w * h fullosity = area / rect_area # get rid of tiny objects that are probably noise if area > area_th and fullosity > .5: aspect_ratio = float(w)/h extent = float(area/ rect_area) hull = cv.convexHull(cnt) hull_area = cv.contourArea(hull) solidity = float(area)/hull_area eq_diameter = np.sqrt(4*area/np.pi) M= cv.moments(cnt) cx= int(M['m10']/M['m00']) cy= int(M['m01']/M['m00']) # draw the contour on the blank image as a filled white object cv.drawContours(cimg, [cnt], 0, color=(255, 255, 255), thickness=-1) # draw the bounding box on the cimg and output img as a green boundary cv.rectangle(cimg, (x, y), (x+w, y+h), (0, 255,0), 2) cv.rectangle(output_image, (x, y), (x+w, y+h), (0, 255,0), 2) # take this rectangle as a subset of the image, and calculate things within it # define the object subset of the image and mask cimg_subset = cimg[y:y+h, x:x+w] img_subset = image[y:y+h, x:x+w, :] img_subset_hsv = cv.cvtColor(img_subset, cv.COLOR_BGR2HSV) # create an hsv mask to remove the black background again color_mask = cv.inRange(cv.cvtColor(img_subset, cv.COLOR_BGR2HSV), (0, 0, 100), (180, 255, 255)).astype(np.uint8) black_mask = cv.inRange(cv.cvtColor(img_subset, cv.COLOR_BGR2HSV), (0, 0, 0), (90, 100, 10)).astype(np.uint8) hsv_mask = black_mask + color_mask # apply the mask f = cv.bitwise_and(img_subset_hsv, img_subset_hsv, mask=hsv_mask).astype(np.uint8) # calculate where the object is pts = np.where(cimg_subset == 255) hue = img_subset_hsv[pts[0], pts[1], 0] sat = img_subset_hsv[pts[0], pts[1], 1] val = img_subset_hsv[pts[0], pts[1], 2] r = img_subset[pts[0], pts[1], 0] g = img_subset[pts[0], pts[1], 1] b = img_subset[pts[0], pts[1], 2] # add the object labels to the cimg for identification cv.putText(cimg, text= str(object_num), org=(cx - 5,cy - 5), fontFace= cv.FONT_HERSHEY_SIMPLEX, fontScale=3, color=(255,0,255), thickness=5, lineType=cv.LINE_AA) # add the object labels to the cimg for identification cv.putText(output_image, text= str(object_num), org=(cx - 5,cy - 5), fontFace= cv.FONT_HERSHEY_SIMPLEX, fontScale=3, color=(255,255,255), thickness=5, lineType=cv.LINE_AA) # print(r.mean(), g.mean(), b.mean(), gli.mean()) df = df.append({'color' : 0, 'x': x, 'y': y, 'object_num': object_num, 'r': r.mean(), 'g': g.mean(), 'b': b.mean(), 'hue': hue.mean(), 'sat': sat.mean(), 'val': val.mean() }, ignore_index=True) # last thing we do on this loop is increment the object_num object_num += 1 # end result should be a pandas dataframe and the contour image with numbers return df.sort_values(by='y', axis=0, ascending=True), output_image def hsv_slide_tool(self, image): def empty(a): pass h, w = int(image.shape[1]/4), int(image.shape[0]/4) cv.namedWindow('masked_image', cv.WINDOW_NORMAL) cv.resizeWindow('masked_image', 800, 600) cv.namedWindow("trackbars") cv.resizeWindow("trackbars", 800, 300) cv.createTrackbar("hue_min", "trackbars", 0, 179, empty) cv.createTrackbar('hue_max', 'trackbars', 179, 179, empty) cv.createTrackbar('sat_min', 'trackbars', 0, 255, empty) cv.createTrackbar('sat_max', 'trackbars', 255, 255, empty) cv.createTrackbar('val_min', 'trackbars', 0, 255, empty) cv.createTrackbar('val_max', 'trackbars', 255, 255, empty) while True: # get image img_hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV) # get trackbar positions h_min = cv.getTrackbarPos("hue_min", "trackbars") h_max = cv.getTrackbarPos('hue_max', 'trackbars') s_min = cv.getTrackbarPos('sat_min', 'trackbars') s_max = cv.getTrackbarPos('sat_max', 'trackbars') v_min = cv.getTrackbarPos('val_min', 'trackbars') v_max = cv.getTrackbarPos('val_max', 'trackbars') # create mask lower_hsv = np.array([h_min, s_min, v_min]) higher_hsv = np.array([h_max, s_max, v_max]) mask = cv.inRange(img_hsv, lower_hsv, higher_hsv) masked_image = cv.bitwise_and(img_hsv, img_hsv, mask=mask) cv.imshow('masked_image', masked_image) k = cv.waitKey(1000) & 0xFF # large wait time if k == 113 or k == 27: break cv.destroyAllWindows() # + ################### testing this out like its matlab ################## imageproc = ImageProcess() # does the background segmentation and other image processing methods, also data extraction matlab = MatlabSurrogate() # does some image loading and display, pretending we're using some test_image = matlab.acquire_kinect_image("images/legos_0.png") # use the segmentation function to segment the image. # seg_image = imageproc.bg_segmentation(test_image) # matlab.imshow(seg_image) # # process the data fully and receive a df backfuschia image_df, cimg = imageproc.process_image_to_df(test_image, area_th = 1000) matlab.imshow(cimg) # + test_image = matlab.acquire_kinect_image("images/legos_0.png") # use the segmentation function to segment the image. seg_image = imageproc.bg_segmentation(test_image) matlab.imshow(seg_image) # - hsv_image = cv.imread("images/legos_0.png") imageproc = ImageProcess() imageproc.hsv_slide_tool(hsv_image) # + # # data and labels # X = df.iloc[:,1:] # y = df.iloc[:,0] # # split into train test sets # X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75) # for c in np.unique(y).astype(np.int): # print(c) # X_c = X_train.iloc[:, c] # print(X_c) # # self._mean = X_c.groupby('') # P(A|B) = P(B|A) * P(A) / P(B) class NaiveBayes: # P(c|x) = P(x|c) * P(c) / P(x) # P(x|x) is the posterior probability # P(x|c) is the likelihood # P(c) is the class prior probability, or the prob of c occuring indpendently. # P(x) is the predictor prior probability, or the prob of x occuring independently def fit(self, features, target): # define class variables self.classes = np.unique(target) self.count = len(self.classes) self.feature_nums = features.shape[1] self.rows = features.shape[0] # calculate statistics for all those features self.calc_statistics(features, target) # prior is the random chance of drawing a particular class based on its proportion in the dataset self.prior = self.calc_prior(features, target) # print(f"self.prior = {self.prior}") # print(f"self.mean = {self.mean}") # print(f"self.var = {self.var}") def get_predictions(self, input_vector): predictions = [] for i in range(len(input_vector)): # print(f"input_vector {i}") result = self.calc_posterior((input_vector.iloc[i,:])) # print(f"result is {result}") predictions.append(result) return predictions def predict(self, observation): #call the calc_posterior function on the observation pred_class = self.calc_posterior(observation) return pred_class def calc_statistics(self, features, target): # calculate mean, variance for each column and convert to numpy array self.mean = features.groupby(target).apply(np.mean).to_numpy() self.var = features.groupby(target).apply(np.var).to_numpy() return self.mean, self.var def calc_prior(self, features, target): # this is the probability of picking one of a class at random from the dataset self.prior = (features.groupby(target).apply(lambda x: len(x)/self.rows).to_numpy()) return self.prior def calc_posterior(self, x): # this is the probability, post evidence # x is a numpy array # x is feature vector for one observation # make a list that we will add each classes posterior prob to posteriors = [] # iterate through the classes for i in range(0, self.count): # for each class look at the prior probability for the class prior = self.prior[i] # calculate the conditional probability for the conditional = np.sum(self.gaussian_density(i, x)) posterior = prior + conditional # print(f"i = {i}, prior = {prior}, conditional = {conditional}, posterior = {posterior}") posteriors.append(posterior) return self.classes[np.argmax(posteriors)] def gaussian_density(self, class_idx, x): # calc probability from gaussian denssityy fucntion (normal dist) mean = self.mean[class_idx] var = self.var[class_idx] # this part sucked and I had a typo that cost me hours numerator = np.exp(-((x-mean)**2 / (2 * var))) denominator = np.sqrt(2 * np.pi * var) return numerator / denominator def pdf(self, x, mean, stdev): # calculate probability density function exponent = np.exp(-((x-mean)**2 / (2*stdev**2))) return exponent * (1/(np.sqrt(2*np.pi)*stdev)) def get_ accuracy(self, test, predictions): correct = 0 for i in range(len(test)): if test.iloc[i] == predictions[i]: correct += 1 return (correct / float(len(test))) def train_model # data and labels X = df.iloc[:,1:] y = df.iloc[:,0] # split into train test sets X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75) # initialize the Naive Bayes class as an object nb = NaiveBayes() # sumnmarize the dataset to train the model # this gets class means, var, priors, etc nb.fit(X_train, y_train) # # # make predictions using the train set y_train_predictions = nb.get_predictions(X_train) acc = nb.get_accuracy(y_train, y_train_predictions) prec = precision_score(y_train, y_train_predictions, average="micro") rec = recall_score(y_train, y_train_predictions, average="micro") print(f"precision is {prec}, recall is {rec}, accuracy = {acc}") # confusion matrix labels = [(i, c) for i, c in labels_dict.items()] cm = confusion_matrix(y_train, y_train_predictions) fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(cm) plt.title('confusion matrix of the classifier') fig.colorbar(cax) plt.xlabel('Predicted') plt.ylabel('True') plt.show() print(labels) # + # use the test set to see how we do y_test_predictions = nb.get_predictions(X_test) # scores acc = nb.get_accuracy(y_test, y_test_predictions) prec = precision_score(y_test, y_test_predictions, average="micro") rec = recall_score(y_test, y_test_predictions, average="micro") print(f"precision is {prec}, recall is {rec}, accuracy = {acc}") # confusion matrix labels = [(i, c) for i, c in labels_dict.items()] cm = confusion_matrix(y_test, y_test_predictions) fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(cm) plt.title('confusion matrix of the classifier') fig.colorbar(cax) plt.xlabel('Predicted') plt.ylabel('True') plt.show() print(labels) # + # from sklearn.externals import joblib # # Save the model as a pickle in a file # joblib.dump(knn, 'filename.pkl') # # Load the model from the file # knn_from_joblib = joblib.load('filename.pkl') # # Use the loaded model to make predictions # knn_from_joblib.predict(X_test) # + df.head() hsv_image = cv.cvtColor(image, cv.COLOR_BGR2HSV) # create an hsv mask test_image = cv.inRange(hsv_image, (50, 20, 0), (160, 255, 255)).astype(np.uint8) test_image = cv.bitwise_and(image, image, mask =test_image).astype(np.uint8) print(test_image[0]) plt.imshow(test_image) # + # # import the cherry images # # C:\data\BSYSE_530\machine_vision\images\Cherries # # there are five, with different light conditions # # DSC_0052, 0054, 0056, 0057, 0058 # # we need to take these images and cut them into little pieces for the process to work # # convert them to RGB # images = [cv.cvtColor(cv.imread("C:/data/BSYSE_530/machine_vision/images/Cherries/DSC_0052.jpg"), cv.COLOR_BGR2RGB), # cv.cvtColor(cv.imread("C:/data/BSYSE_530/machine_vision/images/Cherries/DSC_0054.jpg"), cv.COLOR_BGR2RGB), # cv.cvtColor(cv.imread("C:/data/BSYSE_530/machine_vision/images/Cherries/DSC_0056.jpg"), cv.COLOR_BGR2RGB), # cv.cvtColor(cv.imread("C:/data/BSYSE_530/machine_vision/images/Cherries/DSC_0057.jpg"), cv.COLOR_BGR2RGB), # cv.cvtColor(cv.imread("C:/data/BSYSE_530/machine_vision/images/Cherries/DSC_0058.jpg"), cv.COLOR_BGR2RGB)] # titles = ["DSC_0052", "DSC_0054", "DSC_0056","DSC_0057","DSC_0058"] # masked_images = [] # masks = [] # adj_images = [] # # # # image adjustment, rescale intensity # # for i in range(0, 5): # # img = images[i] # # p2, p98 = np.percentile(img, (2, 98)) # # adj_img = exposure.rescale_intensity(img, in_range=(p2, p98)) # # adj_images.append(adj_img) # # create the mask # # try to screen out all the white regions # background_mask = cv.inRange(images[0], # (70,70,90), # (120,120,120)).astype(np.int) * -1 # print(background_mask.shape) # print(type(background_mask)) # # background_mask = morphology.binary_dilation(background_mask, np.ones((3, 3))) # # closing # background_mask = morphology.closing(background_mask, morphology.disk(2)) # # print(background_mask.shape) # # print(background_mask) # # print(np.mean(images[0][650:700,400:500,0]), np.mean(images[0][600:700,0:100,1]), np.mean(images[0][600:700,0:100,2])) # # now use BGR2HSV to reverse the red and blue to make it easier for hsv filtering of the red (not around 0/360 break) # hsv_image = cv.cvtColor(images[0], cv.COLOR_BGR2HSV) # # create an hsv mask # cherry_mask = cv.inRange(hsv_image, # (70, 30, 20), # (255, 255, 255)).astype(np.int) # cherry_mask = get_tgi_mask(cv.cvtColor(cv.imread("C:/data/BSYSE_530/machine_vision/images/Cherries/DSC_0056.jpg"), cv.COLOR_BGR2RGB).astype(np.float64)) # # make that array of truth values 0 or 255 into a 1 0 array # # cherry_mask = np.where(cherry_mask > 250, 1, 0).astype(np.int) # # median filter to denoise # # cherry_mask = ndimage.median_filter(cherry_mask, size=(3, 3)).astype(np.int) # # do a little dilation to make the mask look nice # cherry_mask = morphology.binary_dilation(cherry_mask, np.ones((3, 3))) # # closing # # cherry_mask = morphology.closing(cherry_mask, morphology.disk(4)) # # erode the mask # cherry_mask = morphology.erosion(cherry_mask, morphology.disk(2)) # #combine the cherry mask and the background mask # # cherry_mask = cherry_mask + background_mask # for image in images: # # apply the mask # masked_image = np.zeros(image.shape) # for channel in range(image.shape[2]): # masked_image[:,:,channel] = image[:,:,channel] * cherry_mask # # the images are going back into "BGR" but thats really RGB # masked_images.append(masked_image.astype(np.uint8)) # # # show the images from the last batch just for kicks # # plot_images(titles=["cherry_mask"], # # images=[cherry_mask], # # fsize=30) # # # show the images from the last batch just for kicks # plot_images(titles=titles, # images=masked_images, # fsize=30) # + # df = pd.DataFrame(columns=['y']) # # produce the individual images we are going to use for our data set in the neural network step # for light_level, img_rgb in enumerate(masked_images): # # create the image subsets and name them as appropriate for location # cherry_0_0 = img_rgb[100:200,200:300,:] # cherry_0_1 = img_rgb[80:180,300:400,:] # cherry_0_2 = img_rgb[90:190,375:475,:] # cherry_0_3 = img_rgb[100:200,500:600,:] # cherry_0_4 = img_rgb[100:200,600:700,:] # cherry_0_5 = img_rgb[100:200,700:800,:] # cherry_1_0 = img_rgb[225:325,190:290,:] # cherry_1_1 = img_rgb[225:325,275:375,:] # cherry_1_2 = img_rgb[225:325,375:475,:] # cherry_1_3 = img_rgb[225:325,500:600,:] # cherry_1_4 = img_rgb[225:325,600:700,:] # cherry_1_5 = img_rgb[225:325,700:800,:] # cherry_2_0 = img_rgb[375:475,175:275,:] # cherry_2_1 = img_rgb[375:475,275:375,:] # cherry_2_2 = img_rgb[375:475,375:475,:] # cherry_2_3 = img_rgb[375:475,500:600,:] # cherry_2_4 = img_rgb[375:475,600:700,:] # cherry_2_5 = img_rgb[375:475,700:800,:] # rectangle_0 = img_rgb[525:550,350:350 + 25,:] # rectangle_1 = img_rgb[525:550,382:382 + 25,:] # rectangle_2 = img_rgb[527:552,415:415 + 25,:] # rectangle_3 = img_rgb[527:552,450:450 + 25,:] # rectangle_4 = img_rgb[528:553,484:484 + 25,:] # rectangle_5 = img_rgb[528:553,519:519 + 25,:] # rectangle_6 = img_rgb[529:554,554:554 + 25,:] # sticky_note = img_rgb[250:430,800:1000,:] # images = [cherry_0_0, cherry_0_1, cherry_0_2, cherry_0_3, cherry_0_4, cherry_0_5, # cherry_1_0, cherry_1_1, cherry_1_2, cherry_1_3, cherry_1_4, cherry_1_5, # cherry_2_0, cherry_2_1, cherry_2_2, cherry_2_3, cherry_2_4, cherry_2_5, # rectangle_0, rectangle_1, rectangle_2, rectangle_3, rectangle_4, rectangle_5, # rectangle_6, sticky_note] # # labels = ["light_color_cherry", "light_color_cherry", "light_color_cherry", "light_color_cherry", "light_color_cherry", "light_color_cherry", # # "moderate_color_cherry", "moderate_color_cherry", "moderate_color_cherry", "moderate_color_cherry", "moderate_color_cherry", "moderate_color_cherry", # # "dark_color_cherry", "dark_color_cherry", "dark_color_cherry", "dark_color_cherry", "dark_color_cherry", "dark_color_cherry", # # "light_color_rectangle", "light_color_rectangle", "moderate_color_rectangle", "moderate_color_rectangle", "moderate_color_rectangle", "dark_color_rectangle", # # "dark_color_rectangle", "sticky_notes"] # labels = [0, 0, 0, 0, 0, 0, # 1, 1, 1, 1, 1, 1, # 2, 2, 2, 2, 2, 2, # 3, 3, 4, 4, 4, 5, 5, 6] # labels_dict = {0: "light_color_cherries", # 1: "moderate_color_cherries", # 2: "dark_color_cherries", # 3: "light_color_rectangles", # 4: "moderate_color_rectangles", # 5: "dark_color_rectangles", # 6: "sticky_notes"} # titles = ["cherry_0_0", "cherry_0_1", "cherry_0_2", "cherry_0_3", "cherry_0_4", "cherry_0_5", # "cherry_1_0", "cherry_1_1", "cherry_1_2", "cherry_1_3", "cherry_1_4", "cherry_1_5", # "cherry_2_0", "cherry_2_1", "cherry_2_2", "cherry_2_3", "cherry_2_4", "cherry_2_5", # "rectangle_0", "rectangle_1", "rectangle_2", "rectangle_3", "rectangle_4", "rectangle_5", # "rectangle_6", "sticky_note"] # # iterate through the zone of interest images # for i, image in enumerate(images): # # # set file name with light level and image title # # filename = str(labels[i]) + " " + titles[i] + "_" + str(light_level) + ".jpg" # # # resize all images to same size for later use # # bgr_image = cv.resize(image, (100,100), interpolation = cv.INTER_AREA) # # bgr_image = cv.cvtColor(image, cv.COLOR_RGB2BGR) # # cv.imwrite("cherries/" + filename, bgr_image) # # # do your dataset creation right here. # # hsv_image = cv.cvtColor(bgr_image, cv.COLOR_BGR2HSV) # # # p1, p2 = np.percentile(image[:,:,0], (2, 99)) # red_channel = exposure.rescale_intensity(image[:,:,0], in_range=(p1, p2)) # blue_channel = exposure.rescale_intensity(image[:,:,1], in_range=(p1, p2)) # green_channel = exposure.rescale_intensity(image[:,:,2], in_range=(p1, p2)) # test_image = image.astype(np.float64) # r = test_image[:,:,0] / np.max(test_image[:,:,0]) # g = test_image[:,:,1] / np.max(test_image[:,:,1]) # b = test_image[:,:,2] / np.max(test_image[:,:,2]) # # gli, ngrdi, r_bg, rbg, tgi*, br, rg # rg_index_labels = ["gli", "ngrdi", "r_bg", "rbg", "tgi", "br", "rg"] # rg_index = [calc_index(test_image, idx) for idx in rg_index_labels] # # get the binary mask for this image, convert to unsigned 8-bit int # bin_image = get_tgi_mask(image) # print(type(bin_image), bin_image.dtype) # contours, hier = cv.findContours(bin_image, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) # cnt = contours[0] # x, y, w, h = cv.boundingRect(cnt) # area = np.sum(bin_image) # cnt_area = cv.contourArea(cnt) # aspect_ratio = float(w)/h # rect_area = w * h # extent = float(cnt_area)/rect_area # hull = cv.convexHull(cnt) # hull_area = cv.contourArea(hull) # solidity = float(cnt_area)/hull_area # eq_diameter = np.sqrt(4*cnt_area/np.pi) # # try converting the images to pandas data frames, each of these channels and indices as a reshaped column. # # then use pandas data frame commands to get some values # df_images = pd.DataFrame() # df_images["r_rs"] = np.ndarray.flatten(red_channel) # df_images["b_rs"] = np.ndarray.flatten(green_channel) # df_images["g_rs"] = np.ndarray.flatten(blue_channel) # df_images["r"] = np.ndarray.flatten(r) # df_images["b"] = np.ndarray.flatten(g) # df_images["g"] = np.ndarray.flatten(b) # df_images["gli"] = np.ndarray.flatten(rg_index[0]) # df_images["ngrdi"] = np.ndarray.flatten(rg_index[1]) # df_images["r_bg"] = np.ndarray.flatten(rg_index[2]) # df_images["rbg"] = np.ndarray.flatten(rg_index[3]) # df_images["tgi"] = np.ndarray.flatten(rg_index[4]) # df_images["br"] = np.ndarray.flatten(rg_index[5]) # df_images["rg"] = np.ndarray.flatten(rg_index[6]) # df = df.append({'y' : labels[i], # 'mean_r_rs': df_images.r_rs[df_images.r_rs > 0].mean(), # 'mean_g_rs': df_images.g_rs[df_images.g_rs > 0].mean(), # 'mean_b_rs': df_images.b_rs[df_images.b_rs > 0].mean(), # 'area': area, # "cnt_area": cnt_area, # # "aspect_ratio": aspect_ratio, # # "rect_area": rect_area, # # "extent": extent, # # "hull_area": hull_area, # # "solidity": solidity, # # "eq_diameter": eq_diameter, # 'mean_r': df_images.r[df_images.r > 0].mean(), # 'mean_g': df_images.g[df_images.g > 0].mean(), # 'mean_b': df_images.b[df_images.b > 0].mean(), # 'gli': df_images.gli[df_images.gli < 0].mean(), # # 'ngrdi': df_images.ngrdi[df_images.ngrdi < 0].mean(), # 'r_bg': df_images.r_bg.mean(), # 'rbg': df_images.rbg.mean(), # 'tgi': df_images.tgi[df_images.tgi < 0].mean(), # 'br': df_images.br[df_images.br < 0].mean(), # 'rg': df_images.rg.mean() # }, ignore_index=True) # # show the images from the last batch just for kicks # plot_images(titles=rg_index_labels, # images=rg_index, # fsize=30) # for image in rg_index: # flat_img = np.ndarray.flatten(image) # print(flat_img.min(), flat_img.max()) # print(df) # + # # do a wacky thing here # # wacky_images = [exposure.equalize_hist(img[:,:,0]) for img in images] # # wacky_images = [exposure.equalize_adapthist(img[:,:,0]) for img in images] # test_image = cv.cvtColor(cv.imread("C:/data/BSYSE_530/machine_vision/images/Cherries/DSC_0052.jpg"), cv.COLOR_BGR2RGB).astype(np.float64) # r = test_image[:,:,0] / np.max(test_image[:,:,0]) # g = test_image[:,:,1] / np.max(test_image[:,:,1]) # b = test_image[:,:,2] / np.max(test_image[:,:,2]) # # gli, ngrdi, r_bg, rbg, tgi*, br, rg # rg_index_labels = ["gli", "ngrdi", "r_bg", "rbg", "tgi", "br", "rg"] # rg_index = [calc_index(test_image, idx) for idx in rg_index_labels] # # show the images from the last batch just for kicks # plot_images(titles=rg_index_labels, # images=rg_index, # fsize=15) # -
pylegoclassifier_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Source of the materials**: Biopython cookbook (adapted) # <font color='red'>Status: Draft</font> # Cluster analysis # ================ # # Cluster analysis is the grouping of items into clusters based on the # similarity of the items to each other. In bioinformatics, clustering is # widely used in gene expression data analysis to find groups of genes # with similar gene expression profiles. This may identify functionally # related genes, as well as suggest the function of presently unknown # genes. # # The Biopython module `Bio.Cluster` provides commonly used clustering # algorithms and was designed with the application to gene expression data # in mind. However, this module can also be used for cluster analysis of # other types of data. `Bio.Cluster` and the underlying C Clustering # Library is described by De Hoon *et al.* @dehoon2004. # # The following four clustering approaches are implemented in # `Bio.Cluster`: # # - Hierarchical clustering (pairwise centroid-, single-, complete-, and # average-linkage); # # - $k$-means, $k$-medians, and $k$-medoids clustering; # # - Self-Organizing Maps; # # - Principal Component Analysis. # # ### Data representation {#data-representation .unnumbered} # # The data to be clustered are represented by a $n \times m$ Numerical # Python array `data`. Within the context of gene expression data # clustering, typically the rows correspond to different genes whereas the # columns correspond to different experimental conditions. The clustering # algorithms in `Bio.Cluster` can be applied both to rows (genes) and to # columns (experiments). # # ### Missing values {#missing-values .unnumbered} # # Often in microarray experiments, some of the data values are missing, # which is indicated by an additional $n \times m$ Numerical Python # integer array `mask`. If `mask[i,j]==0`, then `data[i,j]` is missing and # is ignored in the analysis. # # ### Random number generator {#random-number-generator .unnumbered} # # The $k$-means/medians/medoids clustering algorithms and Self-Organizing # Maps (SOMs) include the use of a random number generator. The uniform # random number generator in `Bio.Cluster` is based on the algorithm by # L’Ecuyer @lecuyer1988, while random numbers following the binomial # distribution are generated using the BTPE algorithm by Kachitvichyanukul # and Schmeiser @kachitvichyanukul1988. The random number generator is # initialized automatically during its first call. As this random number # generator uses a combination of two multiplicative linear congruential # generators, two (integer) seeds are needed for initialization, for which # we use the system-supplied random number generator `rand` (in the C # standard library). We initialize this generator by calling `srand` with # the epoch time in seconds, and use the first two random numbers # generated by `rand` as seeds for the uniform random number generator in # `Bio.Cluster`. # # Distance functions {#sec:distancefunctions} # ------------------ # # In order to cluster items into groups based on their similarity, we # should first define what exactly we mean by *similar*. `Bio.Cluster` # provides eight distance functions, indicated by a single character, to # measure similarity, or conversely, distance: # # - `'e'`: Euclidean distance; # # - `'b'`: City-block distance. # # - `'c'`: Pearson correlation coefficient; # # - `'a'`: Absolute value of the Pearson correlation coefficient; # # - `'u'`: Uncentered Pearson correlation (equivalent to the cosine of # the angle between two data vectors); # # - `'x'`: Absolute uncentered Pearson correlation; # # - `'s'`: Spearman’s rank correlation; # # - `'k'`: Kendall’s $\tau$. # # The first two are true distance functions that satisfy the triangle # inequality: # $$d\left(\underline{u},\underline{v}\right) \leq d\left(\underline{u},\underline{w}\right) + d\left(\underline{w},\underline{v}\right) \textrm{ for all } \underline{u}, \underline{v}, \underline{w},$$ # and are therefore refered to as *metrics*. In everyday language, this # means that the shortest distance between two points is a straight line. # # The remaining six distance measures are related to the correlation # coefficient, where the distance $d$ is defined in terms of the # correlation $r$ by $d=1-r$. Note that these distance functions are # *semi-metrics* that do not satisfy the triangle inequality. For example, # for $$\underline{u}=\left(1,0,-1\right);$$ # $$\underline{v}=\left(1,1,0\right);$$ # $$\underline{w}=\left(0,1,1\right);$$ we find a Pearson distance # $d\left(\underline{u},\underline{w}\right) = 1.8660$, while # $d\left(\underline{u},\underline{v}\right)+d\left(\underline{v},\underline{w}\right) = 1.6340$. # # ### Euclidean distance {#euclidean-distance .unnumbered} # # In `Bio.Cluster`, we define the Euclidean distance as # $$d = {1 \over n} \sum_{i=1}^{n} \left(x_i-y_i\right)^{2}.$$ Only those # terms are included in the summation for which both $x_i$ and $y_i$ are # present, and the denominator $n$ is chosen accordingly. As the # expression data $x_i$ and $y_i$ are subtracted directly from each other, # we should make sure that the expression data are properly normalized # when using the Euclidean distance. # # ### City-block distance {#city-block-distance .unnumbered} # # The city-block distance, alternatively known as the Manhattan distance, # is related to the Euclidean distance. Whereas the Euclidean distance # corresponds to the length of the shortest path between two points, the # city-block distance is the sum of distances along each dimension. As # gene expression data tend to have missing values, in `Bio.Cluster` we # define the city-block distance as the sum of distances divided by the # number of dimensions: # $$d = {1 \over n} \sum_{i=1}^n \left|x_i-y_i\right|.$$ This is equal to # the distance you would have to walk between two points in a city, where # you have to walk along city blocks. As for the Euclidean distance, the # expression data are subtracted directly from each other, and we should # therefore make sure that they are properly normalized. # # ### The Pearson correlation coefficient {#the-pearson-correlation-coefficient .unnumbered} # # The Pearson correlation coefficient is defined as # $$r = \frac{1}{n} \sum_{i=1}^n \left( \frac{x_i -\bar{x}}{\sigma_x} \right) \left(\frac{y_i -\bar{y}}{\sigma_y} \right),$$ # in which $\bar{x}, \bar{y}$ are the sample mean of $x$ and $y$ # respectively, and $\sigma_x, \sigma_y$ are the sample standard deviation # of $x$ and $y$. The Pearson correlation coefficient is a measure for how # well a straight line can be fitted to a scatterplot of $x$ and $y$. If # all the points in the scatterplot lie on a straight line, the Pearson # correlation coefficient is either +1 or -1, depending on whether the # slope of line is positive or negative. If the Pearson correlation # coefficient is equal to zero, there is no correlation between $x$ and # $y$. # # The *Pearson distance* is then defined as # $$d_{\textrm{P}} \equiv 1 - r.$$ As the Pearson correlation coefficient # lies between -1 and 1, the Pearson distance lies between 0 and 2. # # ### Absolute Pearson correlation {#absolute-pearson-correlation .unnumbered} # # By taking the absolute value of the Pearson correlation, we find a # number between 0 and 1. If the absolute value is 1, all the points in # the scatter plot lie on a straight line with either a positive or a # negative slope. If the absolute value is equal to zero, there is no # correlation between $x$ and $y$. # # The corresponding distance is defined as # $$d_{\textrm A} \equiv 1 - \left|r\right|,$$ where $r$ is the Pearson # correlation coefficient. As the absolute value of the Pearson # correlation coefficient lies between 0 and 1, the corresponding distance # lies between 0 and 1 as well. # # In the context of gene expression experiments, the absolute correlation # is equal to 1 if the gene expression profiles of two genes are either # exactly the same or exactly opposite. The absolute correlation # coefficient should therefore be used with care. # # ### Uncentered correlation (cosine of the angle) {#uncentered-correlation-cosine-of-the-angle .unnumbered} # # In some cases, it may be preferable to use the *uncentered correlation* # instead of the regular Pearson correlation coefficient. The uncentered # correlation is defined as # $$r_{\textrm U} = \frac{1}{n} \sum_{i=1}^{n} \left(\frac{x_i}{\sigma_x^{(0)}} \right) \left(\frac{y_i}{\sigma_y^{(0)}} \right),$$ # where $$\begin{aligned} # \sigma_x^{(0)} & = & \sqrt{{\frac{1}{n}} \sum_{i=1}^{n}x_i^2}; \nonumber \\ # \sigma_y^{(0)} & = & \sqrt{{\frac{1}{n}} \sum_{i=1}^{n}y_i^2}. \nonumber\end{aligned}$$ # This is the same expression as for the regular Pearson correlation # coefficient, except that the sample means $\bar{x}, \bar{y}$ are set # equal to zero. The uncentered correlation may be appropriate if there is # a zero reference state. For instance, in the case of gene expression # data given in terms of log-ratios, a log-ratio equal to zero corresponds # to the green and red signal being equal, which means that the # experimental manipulation did not affect the gene expression. # # The distance corresponding to the uncentered correlation coefficient is # defined as $$d_{\mbox{U}} \equiv 1 - r_{\mbox{U}},$$ where # $r_{\mbox{U}}$ is the uncentered correlation. As the uncentered # correlation coefficient lies between -1 and 1, the corresponding # distance lies between 0 and 2. # # The uncentered correlation is equal to the cosine of the angle of the # two data vectors in $n$-dimensional space, and is often referred to as # such. # # ### Absolute uncentered correlation {#absolute-uncentered-correlation .unnumbered} # # As for the regular Pearson correlation, we can define a distance measure # using the absolute value of the uncentered correlation: # $$d_{\mbox{AU}} \equiv 1 - \left|r_{\mbox{U}}\right|,$$ where # $r_{\mbox{U}}$ is the uncentered correlation coefficient. As the # absolute value of the uncentered correlation coefficient lies between 0 # and 1, the corresponding distance lies between 0 and 1 as well. # # Geometrically, the absolute value of the uncentered correlation is equal # to the cosine between the supporting lines of the two data vectors # (i.e., the angle without taking the direction of the vectors into # consideration). # # ### Spearman rank correlation {#spearman-rank-correlation .unnumbered} # # The Spearman rank correlation is an example of a non-parametric # similarity measure, and tends to be more robust against outliers than # the Pearson correlation. # # To calculate the Spearman rank correlation, we replace each data value # by their rank if we would order the data in each vector by their value. # We then calculate the Pearson correlation between the two rank vectors # instead of the data vectors. # # As in the case of the Pearson correlation, we can define a distance # measure corresponding to the Spearman rank correlation as # $$d_{\mbox{S}} \equiv 1 - r_{\mbox{S}},$$ where $r_{\mbox{S}}$ is the # Spearman rank correlation. # # ### Kendall’s $\tau$ {#kendalls-tau .unnumbered} # # Kendall’s $\tau$ is another example of a non-parametric similarity # measure. It is similar to the Spearman rank correlation, but instead of # the ranks themselves only the relative ranks are used to calculate # $\tau$ (see Snedecor & Cochran @snedecor1989). # # We can define a distance measure corresponding to Kendall’s $\tau$ as # $$d_{\mbox{K}} \equiv 1 - \tau.$$ As Kendall’s $\tau$ is always between # -1 and 1, the corresponding distance will be between 0 and 2. # # ### Weighting {#weighting .unnumbered} # # For most of the distance functions available in `Bio.Cluster`, a weight # vector can be applied. The weight vector contains weights for the items # in the data vector. If the weight for item $i$ is $w_i$, then that item # is treated as if it occurred $w_i$ times in the data. The weight do not # have to be integers. For the Spearman rank correlation and Kendall’s # $\tau$, weights do not have a well-defined meaning and are therefore not # implemented. # # ### Calculating the distance matrix {#subsec:distancematrix .unnumbered} # # The distance matrix is a square matrix with all pairwise distances # between the items in `data`, and can be calculated by the function # `distancematrix` in the `Bio.Cluster` module: # # from Bio.Cluster import distancematrix matrix = distancematrix(data) # # where the following arguments are defined: # # - `data` (required)\ # Array containing the data for the items. # # - `mask` (default: `None`)\ # Array of integers showing which data are missing. If `mask[i,j]==0`, # then `data[i,j]` is missing. If `mask==None`, then all data # are present. # # - `weight` (default: `None`)\ # The weights to be used when calculating distances. If # `weight==None`, then equal weights are assumed. # # - `transpose` (default: `0`)\ # Determines if the distances between the rows of `data` are to be # calculated (`transpose==0`), or between the columns of `data` # (`transpose==1`). # # - `dist` (default: `'e'`, Euclidean distance)\ # Defines the distance function to be used # (see \[sec:distancefunctions\]). # # To save memory, the distance matrix is returned as a list of 1D arrays. # The number of columns in each row is equal to the row number. Hence, the # first row has zero elements. An example of the return value is # # # ``` # [array([]), # array([1.]), # array([7., 3.]), # array([4., 2., 6.])] # # ``` # # This corresponds to the distance matrix $$\left( # \begin{array}{cccc} # 0 & 1 & 7 & 4 \\ # 1 & 0 & 3 & 2 \\ # 7 & 3 & 0 & 6 \\ # 4 & 2 & 6 & 0 # \end{array} # \right).$$ # # Calculating cluster properties # ------------------------------ # # ### Calculating the cluster centroids {#subsec:clustercentroids .unnumbered} # # The centroid of a cluster can be defined either as the mean or as the # median of each dimension over all cluster items. The function # `clustercentroids` in `Bio.Cluster` can be used to calculate either: # # from Bio.Cluster import clustercentroids cdata, cmask = clustercentroids(data) # # where the following arguments are defined: # # - `data` (required)\ # Array containing the data for the items. # # - `mask` (default: `None`)\ # Array of integers showing which data are missing. If `mask[i,j]==0`, # then `data[i,j]` is missing. If `mask==None`, then all data # are present. # # - `clusterid` (default: `None`)\ # Vector of integers showing to which cluster each item belongs. If # `clusterid` is `None`, then all items are assumed to belong to the # same cluster. # # - `method` (default: `'a'`)\ # Specifies whether the arithmetic mean (`method=='a'`) or the median # (`method=='m'`) is used to calculate the cluster center. # # - `transpose` (default: `0`)\ # Determines if the centroids of the rows of `data` are to be # calculated (`transpose==0`), or the centroids of the columns of # `data` (`transpose==1`). # # This function returns the tuple `(cdata, cmask)`. The centroid data are # stored in the 2D Numerical Python array `cdata`, with missing data # indicated by the 2D Numerical Python integer array `cmask`. The # dimensions of these arrays are # $\left(\textrm{number of clusters}, \textrm{number of columns}\right)$ # if `transpose` is `0`, or # $\left(\textrm{number of rows}, \textrm{number of clusters}\right)$ if # `transpose` is `1`. Each row (if `transpose` is `0`) or column (if # `transpose` is `1`) contains the averaged data corresponding to the # centroid of each cluster. # # ### Calculating the distance between clusters {#calculating-the-distance-between-clusters .unnumbered} # # Given a distance function between *items*, we can define the distance # between two *clusters* in several ways. The distance between the # arithmetic means of the two clusters is used in pairwise # centroid-linkage clustering and in $k$-means clustering. In $k$-medoids # clustering, the distance between the medians of the two clusters is used # instead. The shortest pairwise distance between items of the two # clusters is used in pairwise single-linkage clustering, while the # longest pairwise distance is used in pairwise maximum-linkage # clustering. In pairwise average-linkage clustering, the distance between # two clusters is defined as the average over the pairwise distances. # # To calculate the distance between two clusters, use # # from Bio.Cluster import clusterdistance distance = clusterdistance(data) # # where the following arguments are defined: # # - `data` (required)\ # Array containing the data for the items. # # - `mask` (default: `None`)\ # Array of integers showing which data are missing. If `mask[i,j]==0`, # then `data[i,j]` is missing. If `mask==None`, then all data # are present. # # - `weight` (default: `None`)\ # The weights to be used when calculating distances. If # `weight==None`, then equal weights are assumed. # # - `index1` (default: `0`)\ # A list containing the indices of the items belonging to the # first cluster. A cluster containing only one item $i$ can be # represented either as a list `[i]`, or as an integer `i`. # # - `index2` (default: `0`)\ # A list containing the indices of the items belonging to the # second cluster. A cluster containing only one items $i$ can be # represented either as a list `[i]`, or as an integer `i`. # # - `method` (default: `'a'`)\ # Specifies how the distance between clusters is defined: # # - `'a'`: Distance between the two cluster centroids (arithmetic # mean); # # - `'m'`: Distance between the two cluster centroids (median); # # - `'s'`: Shortest pairwise distance between items in the two # clusters; # # - `'x'`: Longest pairwise distance between items in the two # clusters; # # - `'v'`: Average over the pairwise distances between items in the # two clusters. # # - `dist` (default: `'e'`, Euclidean distance)\ # Defines the distance function to be used # (see \[sec:distancefunctions\]). # # - `transpose` (default: `0`)\ # If `transpose==0`, calculate the distance between the rows of # `data`. If `transpose==1`, calculate the distance between the # columns of `data`. # # Partitioning algorithms # ----------------------- # # Partitioning algorithms divide items into $k$ clusters such that the sum # of distances over the items to their cluster centers is minimal. The # number of clusters $k$ is specified by the user. Three partitioning # algorithms are available in `Bio.Cluster`: # # - $k$-means clustering # # - $k$-medians clustering # # - $k$-medoids clustering # # These algorithms differ in how the cluster center is defined. In # $k$-means clustering, the cluster center is defined as the mean data # vector averaged over all items in the cluster. Instead of the mean, in # $k$-medians clustering the median is calculated for each dimension in # the data vector. Finally, in $k$-medoids clustering the cluster center # is defined as the item which has the smallest sum of distances to the # other items in the cluster. This clustering algorithm is suitable for # cases in which the distance matrix is known but the original data matrix # is not available, for example when clustering proteins based on their # structural similarity. # # The expectation-maximization (EM) algorithm is used to find this # partitioning into $k$ groups. In the initialization of the EM algorithm, # we randomly assign items to clusters. To ensure that no empty clusters # are produced, we use the binomial distribution to randomly choose the # number of items in each cluster to be one or more. We then randomly # permute the cluster assignments to items such that each item has an # equal probability to be in any cluster. Each cluster is thus guaranteed # to contain at least one item. # # We then iterate: # # - Calculate the centroid of each cluster, defined as either the mean, # the median, or the medoid of the cluster; # # - Calculate the distances of each item to the cluster centers; # # - For each item, determine which cluster centroid is closest; # # - Reassign each item to its closest cluster, or stop the iteration if # no further item reassignments take place. # # To avoid clusters becoming empty during the iteration, in $k$-means and # $k$-medians clustering the algorithm keeps track of the number of items # in each cluster, and prohibits the last remaining item in a cluster from # being reassigned to a different cluster. For $k$-medoids clustering, # such a check is not needed, as the item that functions as the cluster # centroid has a zero distance to itself, and will therefore never be # closer to a different cluster. # # As the initial assignment of items to clusters is done randomly, usually # a different clustering solution is found each time the EM algorithm is # executed. To find the optimal clustering solution, the $k$-means # algorithm is repeated many times, each time starting from a different # initial random clustering. The sum of distances of the items to their # cluster center is saved for each run, and the solution with the smallest # value of this sum will be returned as the overall clustering solution. # # How often the EM algorithm should be run depends on the number of items # being clustered. As a rule of thumb, we can consider how often the # optimal solution was found; this number is returned by the partitioning # algorithms as implemented in this library. If the optimal solution was # found many times, it is unlikely that better solutions exist than the # one that was found. However, if the optimal solution was found only # once, there may well be other solutions with a smaller within-cluster # sum of distances. If the number of items is large (more than several # hundreds), it may be difficult to find the globally optimal solution. # # The EM algorithm terminates when no further reassignments take place. We # noticed that for some sets of initial cluster assignments, the EM # algorithm fails to converge due to the same clustering solution # reappearing periodically after a small number of iteration steps. We # therefore check for the occurrence of such periodic solutions during the # iteration. After a given number of iteration steps, the current # clustering result is saved as a reference. By comparing the clustering # result after each subsequent iteration step to the reference state, we # can determine if a previously encountered clustering result is found. In # such a case, the iteration is halted. If after a given number of # iterations the reference state has not yet been encountered, the current # clustering solution is saved to be used as the new reference state. # Initially, ten iteration steps are executed before resaving the # reference state. This number of iteration steps is doubled each time, to # ensure that periodic behavior with longer periods can also be detected. # # ### $k$-means and $k$-medians {#k-means-and-k-medians .unnumbered} # # The $k$-means and $k$-medians algorithms are implemented as the function # `kcluster` in `Bio.Cluster`: # # from Bio.Cluster import kcluster clusterid, error, nfound = kcluster(data) # # where the following arguments are defined: # # - `data` (required)\ # Array containing the data for the items. # # - `nclusters` (default: `2`)\ # The number of clusters $k$. # # - `mask` (default: `None`)\ # Array of integers showing which data are missing. If `mask[i,j]==0`, # then `data[i,j]` is missing. If `mask==None`, then all data # are present. # # - `weight` (default: `None`)\ # The weights to be used when calculating distances. If # `weight==None`, then equal weights are assumed. # # - `transpose` (default: `0`)\ # Determines if rows (`transpose` is `0`) or columns (`transpose` is # `1`) are to be clustered. # # - `npass` (default: `1`)\ # The number of times the $k$-means/-medians clustering algorithm is # performed, each time with a different (random) initial condition. If # `initialid` is given, the value of `npass` is ignored and the # clustering algorithm is run only once, as it behaves # deterministically in that case. # # - `method` (default: `a`)\ # describes how the center of a cluster is found: # # - `method=='a'`: arithmetic mean ($k$-means clustering); # # - `method=='m'`: median ($k$-medians clustering). # # For other values of `method`, the arithmetic mean is used. # # - `dist` (default: `'e'`, Euclidean distance)\ # Defines the distance function to be used # (see \[sec:distancefunctions\]). Whereas all eight distance measures # are accepted by `kcluster`, from a theoretical viewpoint it is best # to use the Euclidean distance for the $k$-means algorithm, and the # city-block distance for $k$-medians. # # - `initialid` (default: `None`)\ # Specifies the initial clustering to be used for the EM algorithm. If # `initialid==None`, then a different random initial clustering is # used for each of the `npass` runs of the EM algorithm. If # `initialid` is not `None`, then it should be equal to a 1D array # containing the cluster number (between `0` and `nclusters-1`) for # each item. Each cluster should contain at least one item. With the # initial clustering specified, the EM algorithm is deterministic. # # This function returns a tuple `(clusterid, error, nfound)`, where # `clusterid` is an integer array containing the number of the cluster to # which each row or cluster was assigned, `error` is the within-cluster # sum of distances for the optimal clustering solution, and `nfound` is # the number of times this optimal solution was found. # # ### $k$-medoids clustering {#k-medoids-clustering .unnumbered} # # The `kmedoids` routine performs $k$-medoids clustering on a given set of # items, using the distance matrix and the number of clusters passed by # the user: # # from Bio.Cluster import kmedoids clusterid, error, nfound = kmedoids(distance) # # where the following arguments are defined: , nclusters=2, npass=1, # initialid=None)| # # - <span>`distance`</span> (required)\ # The matrix containing the distances between the items; this matrix # can be specified in three ways: # # - as a 2D Numerical Python array (in which only the left-lower # part of the array will be accessed): # # # ``` # distance = array([[0.0, 1.1, 2.3], # [1.1, 0.0, 4.5], # [2.3, 4.5, 0.0]]) # # ``` # # - as a 1D Numerical Python array containing consecutively the # distances in the left-lower part of the distance matrix: # # # ``` # distance = array([1.1, 2.3, 4.5]) # # ``` # # - as a list containing the rows of the left-lower part of the # distance matrix: # # # ``` # distance = [array([]|, # array([1.1]), # array([2.3, 4.5]) # ] # # ``` # # These three expressions correspond to the same distance matrix. # # - `nclusters` (default: `2`)\ # The number of clusters $k$. # # - `npass` (default: `1`)\ # The number of times the $k$-medoids clustering algorithm is # performed, each time with a different (random) initial condition. If # `initialid` is given, the value of `npass` is ignored, as the # clustering algorithm behaves deterministically in that case. # # - `initialid` (default: `None`)\ # Specifies the initial clustering to be used for the EM algorithm. If # `initialid==None`, then a different random initial clustering is # used for each of the `npass` runs of the EM algorithm. If # `initialid` is not `None`, then it should be equal to a 1D array # containing the cluster number (between `0` and `nclusters-1`) for # each item. Each cluster should contain at least one item. With the # initial clustering specified, the EM algorithm is deterministic. # # This function returns a tuple `(clusterid, error, nfound)`, where # `clusterid` is an array containing the number of the cluster to which # each item was assigned, `error` is the within-cluster sum of distances # for the optimal $k$-medoids clustering solution, and `nfound` is the # number of times the optimal solution was found. Note that the cluster # number in `clusterid` is defined as the item number of the item # representing the cluster centroid. # # Hierarchical clustering # ----------------------- # # Hierarchical clustering methods are inherently different from the # $k$-means clustering method. In hierarchical clustering, the similarity # in the expression profile between genes or experimental conditions are # represented in the form of a tree structure. This tree structure can be # shown graphically by programs such as Treeview and Java Treeview, which # has contributed to the popularity of hierarchical clustering in the # analysis of gene expression data. # # The first step in hierarchical clustering is to calculate the distance # matrix, specifying all the distances between the items to be clustered. # Next, we create a node by joining the two closest items. Subsequent # nodes are created by pairwise joining of items or nodes based on the # distance between them, until all items belong to the same node. A tree # structure can then be created by retracing which items and nodes were # merged. Unlike the EM algorithm, which is used in $k$-means clustering, # the complete process of hierarchical clustering is deterministic. # # Several flavors of hierarchical clustering exist, which differ in how # the distance between subnodes is defined in terms of their members. In # `Bio.Cluster`, pairwise single, maximum, average, and centroid linkage # are available. # # - In pairwise single-linkage clustering, the distance between two # nodes is defined as the shortest distance among the pairwise # distances between the members of the two nodes. # # - In pairwise maximum-linkage clustering, alternatively known as # pairwise complete-linkage clustering, the distance between two nodes # is defined as the longest distance among the pairwise distances # between the members of the two nodes. # # - In pairwise average-linkage clustering, the distance between two # nodes is defined as the average over all pairwise distances between # the items of the two nodes. # # - In pairwise centroid-linkage clustering, the distance between two # nodes is defined as the distance between their centroids. The # centroids are calculated by taking the mean over all the items in # a cluster. As the distance from each newly formed node to existing # nodes and items need to be calculated at each step, the computing # time of pairwise centroid-linkage clustering may be significantly # longer than for the other hierarchical clustering methods. Another # peculiarity is that (for a distance measure based on the Pearson # correlation), the distances do not necessarily increase when going # up in the clustering tree, and may even decrease. This is caused by # an inconsistency between the centroid calculation and the distance # calculation when using the Pearson correlation: Whereas the Pearson # correlation effectively normalizes the data for the distance # calculation, no such normalization occurs for the # centroid calculation. # # For pairwise single-, complete-, and average-linkage clustering, the # distance between two nodes can be found directly from the distances # between the individual items. Therefore, the clustering algorithm does # not need access to the original gene expression data, once the distance # matrix is known. For pairwise centroid-linkage clustering, however, the # centroids of newly formed subnodes can only be calculated from the # original data and not from the distance matrix. # # The implementation of pairwise single-linkage hierarchical clustering is # based on the SLINK algorithm (<NAME>, 1973), which is much faster and # more memory-efficient than a straightforward implementation of pairwise # single-linkage clustering. The clustering result produced by this # algorithm is identical to the clustering solution found by the # conventional single-linkage algorithm. The single-linkage hierarchical # clustering algorithm implemented in this library can be used to cluster # large gene expression data sets, for which conventional hierarchical # clustering algorithms fail due to excessive memory requirements and # running time. # # ### Representing a hierarchical clustering solution {#representing-a-hierarchical-clustering-solution .unnumbered} # # The result of hierarchical clustering consists of a tree of nodes, in # which each node joins two items or subnodes. Usually, we are not only # interested in which items or subnodes are joined at each node, but also # in their similarity (or distance) as they are joined. To store one node # in the hierarchical clustering tree, we make use of the class `Node`, # which defined in `Bio.Cluster`. An instance of `Node` has three # attributes: # # - `left` # # - `right` # # - `distance` # # Here, `left` and `right` are integers referring to the two items or # subnodes that are joined at this node, and `distance` is the distance # between them. The items being clustered are numbered from 0 to # $\left(\textrm{number of items} - 1\right)$, while clusters are numbered # from -1 to $-\left(\textrm{number of items}-1\right)$. Note that the # number of nodes is one less than the number of items. # # To create a new `Node` object, we need to specify `left` and `right`; # `distance` is optional. # # from Bio.Cluster import Node Node(2, 3) Node(2, 3, 0.91) # # The attributes `left`, `right`, and `distance` of an existing `Node` # object can be modified directly: # # node = Node(4, 5) node.left = 6 node.right = 2 node.distance = 0.73 node # # An error is raised if `left` and `right` are not integers, or if # `distance` cannot be converted to a floating-point value. # # The Python class `Tree` represents a full hierarchical clustering # solution. A `Tree` object can be created from a list of `Node` objects: # # from Bio.Cluster import Node, Tree nodes = [Node(1, 2, 0.2), Node(0, 3, 0.5), Node(-2, 4, 0.6), Node(-1, -3, 0.9)] tree = Tree(nodes) print(tree) # # The `Tree` initializer checks if the list of nodes is a valid # hierarchical clustering result: # # nodes = [Node(1, 2, 0.2), Node(0, 2, 0.5)] try: Tree(nodes) raise Exception("Should not arrive here") except ValueError: print("This tree is problematic") # # Individual nodes in a `Tree` object can be accessed using square # brackets: # # nodes = [Node(1, 2, 0.2), Node(0, -1, 0.5)] tree = Tree(nodes) tree[0] tree[1] tree[-1] # # As a `Tree` object is read-only, we cannot change individual nodes in a # `Tree` object. However, we can convert the tree to a list of nodes, # modify this list, and create a new tree from this list: # # tree = Tree([Node(1, 2, 0.1), Node(0, -1, 0.5), Node(-2, 3, 0.9)]) print(tree) nodes = tree[:] nodes[0] = Node(0, 1, 0.2) nodes[1].left = 2 tree = Tree(nodes) print(tree) # # This guarantees that any `Tree` object is always well-formed. # # To display a hierarchical clustering solution with visualization # programs such as Java Treeview, it is better to scale all node distances # such that they are between zero and one. This can be accomplished by # calling the `scale` method on an existing `Tree` object: # # tree.scale() # # This method takes no arguments, and returns `None`. # # After hierarchical clustering, the items can be grouped into $k$ # clusters based on the tree structure stored in the `Tree` object by # cutting the tree: # # clusterid = tree.cut(nclusters=1) # # where `nclusters` (defaulting to `1`) is the desired number of clusters # $k$. This method ignores the top $k-1$ linking events in the tree # structure, resulting in $k$ separated clusters of items. The number of # clusters $k$ should be positive, and less than or equal to the number of # items. This method returns an array `clusterid` containing the number of # the cluster to which each item is assigned. # # ### Performing hierarchical clustering {#performing-hierarchical-clustering .unnumbered} # # To perform hierarchical clustering, use the `treecluster` function in # `Bio.Cluster`. # # from Bio.Cluster import treecluster tree = treecluster(data) # # where the following arguments are defined: # # - `data`\ # Array containing the data for the items. # # - `mask` (default: `None`)\ # Array of integers showing which data are missing. If `mask[i,j]==0`, # then `data[i,j]` is missing. If `mask==None`, then all data # are present. # # - `weight` (default: `None`)\ # The weights to be used when calculating distances. If # `weight==None`, then equal weights are assumed. # # - `transpose` (default: `0`)\ # Determines if rows (`transpose==0`) or columns (`transpose==1`) are # to be clustered. # # - `method` (default: `'m'`)\ # defines the linkage method to be used: # # - `method=='s'`: pairwise single-linkage clustering # # - `method=='m'`: pairwise maximum- (or complete-) linkage # clustering # # - `method=='c'`: pairwise centroid-linkage clustering # # - `method=='a'`: pairwise average-linkage clustering # # - `dist` (default: `'e'`, Euclidean distance)\ # Defines the distance function to be used # (see \[sec:distancefunctions\]). # # To apply hierarchical clustering on a precalculated distance matrix, # specify the `distancematrix` argument when calling `treecluster` # function instead of the `data` argument: # # from Bio.Cluster import treecluster tree = treecluster(distancematrix=distance) # # In this case, the following arguments are defined: # # - `distancematrix`\ # The distance matrix, which can be specified in three ways: # # - as a 2D Numerical Python array (in which only the left-lower # part of the array will be accessed): # # # ``` # distance = array([[0.0, 1.1, 2.3], # [1.1, 0.0, 4.5], # [2.3, 4.5, 0.0]]) # # ``` # # - as a 1D Numerical Python array containing consecutively the # distances in the left-lower part of the distance matrix: # # # ``` # distance = array([1.1, 2.3, 4.5]) # # ``` # # - as a list containing the rows of the left-lower part of the # distance matrix: # # # ``` # distance = [array([]), # array([1.1]), # array([2.3, 4.5]) # # ``` # # These three expressions correspond to the same distance matrix. As # `treecluster` may shuffle the values in the distance matrix as part # of the clustering algorithm, be sure to save this array in a # different variable before calling `treecluster` if you need # it later. # # - `method`\ # The linkage method to be used: # # - `method=='s'`: pairwise single-linkage clustering # # - `method=='m'`: pairwise maximum- (or complete-) linkage # clustering # # - `method=='a'`: pairwise average-linkage clustering # # While pairwise single-, maximum-, and average-linkage clustering can # be calculated from the distance matrix alone, pairwise # centroid-linkage cannot. # # When calling `treecluster`, either `data` or `distancematrix` should be # `None`. # # This function returns a `Tree` object. This object contains # $\left(\textrm{number of items} - 1\right)$ nodes, where the number of # items is the number of rows if rows were clustered, or the number of # columns if columns were clustered. Each node describes a pairwise # linking event, where the node attributes `left` and `right` each contain # the number of one item or subnode, and `distance` the distance between # them. Items are numbered from 0 to # $\left(\textrm{number of items} - 1\right)$, while clusters are numbered # -1 to $-\left(\textrm{number of items}-1\right)$. # # Self-Organizing Maps # -------------------- # # Self-Organizing Maps (SOMs) were invented by Kohonen to describe neural # networks (see for instance Kohonen, 1997 @kohonen1997). Tamayo (1999) # first applied Self-Organizing Maps to gene expression data @tamayo1999. # # SOMs organize items into clusters that are situated in some topology. # Usually a rectangular topology is chosen. The clusters generated by SOMs # are such that neighboring clusters in the topology are more similar to # each other than clusters far from each other in the topology. # # The first step to calculate a SOM is to randomly assign a data vector to # each cluster in the topology. If rows are being clustered, then the # number of elements in each data vector is equal to the number of # columns. # # An SOM is then generated by taking rows one at a time, and finding which # cluster in the topology has the closest data vector. The data vector of # that cluster, as well as those of the neighboring clusters, are adjusted # using the data vector of the row under consideration. The adjustment is # given by # $$\Delta \underline{x}_{\textrm{cell}} = \tau \cdot \left(\underline{x}_{\textrm{row}} - \underline{x}_{\textrm{cell}} \right).$$ # The parameter $\tau$ is a parameter that decreases at each iteration # step. We have used a simple linear function of the iteration step: # $$\tau = \tau_{\textrm{init}} \cdot \left(1 - {i \over n}\right),$$ # $\tau_{\textrm{init}}$ is the initial value of $\tau$ as specified by # the user, $i$ is the number of the current iteration step, and $n$ is # the total number of iteration steps to be performed. While changes are # made rapidly in the beginning of the iteration, at the end of iteration # only small changes are made. # # All clusters within a radius $R$ are adjusted to the gene under # consideration. This radius decreases as the calculation progresses as # $$R = R_{\textrm{max}} \cdot \left(1 - {i \over n}\right),$$ in which # the maximum radius is defined as # $$R_{\textrm{max}} = \sqrt{N_x^2 + N_y^2},$$ where # $\left(N_x, N_y\right)$ are the dimensions of the rectangle defining the # topology. # # The function `somcluster` implements the complete algorithm to calculate # a Self-Organizing Map on a rectangular grid. First it initializes the # random number generator. The node data are then initialized using the # random number generator. The order in which genes or microarrays are # used to modify the SOM is also randomized. The total number of # iterations in the SOM algorithm is specified by the user. # # To run `somcluster`, use # # from Bio.Cluster import somcluster clusterid, celldata = somcluster(data) # # where the following arguments are defined: # # - `data` (required)\ # Array containing the data for the items. # # - `mask` (default: `None`)\ # Array of integers showing which data are missing. If `mask[i,j]==0`, # then `data[i,j]` is missing. If `mask==None`, then all data # are present. # # - `weight` (default: `None`)\ # contains the weights to be used when calculating distances. If # `weight==None`, then equal weights are assumed. # # - `transpose` (default: `0`)\ # Determines if rows (`transpose` is `0`) or columns (`transpose` is # `1`) are to be clustered. # # - `nxgrid, nygrid` (default: `2, 1`)\ # The number of cells horizontally and vertically in the rectangular # grid on which the Self-Organizing Map is calculated. # # - `inittau` (default: `0.02`)\ # The initial value for the parameter $\tau$ that is used in the # SOM algorithm. The default value for `inittau` is 0.02, which was # used in Michael Eisen’s Cluster/TreeView program. # # - `niter` (default: `1`)\ # The number of iterations to be performed. # # - `dist` (default: `'e'`, Euclidean distance)\ # Defines the distance function to be used # (see \[sec:distancefunctions\]). # # This function returns the tuple `(clusterid, celldata)`: # # - `clusterid`:\ # An array with two columns, where the number of rows is equal to the # number of items that were clustered. Each row contains the $x$ and # $y$ coordinates of the cell in the rectangular SOM grid to which the # item was assigned. # # - `celldata`:\ # An array with dimensions # $\left(\verb|nxgrid|, \verb|nygrid|, \textrm{number of columns}\right)$ # if rows are being clustered, or # $\left(\verb|nxgrid|, \verb|nygrid|, \textrm{number of rows}\right)$ # if columns are being clustered. Each element `[ix][iy]` of this # array is a 1D vector containing the gene expression data for the # centroid of the cluster in the grid cell with coordinates # `[ix][iy]`. # # Principal Component Analysis # ---------------------------- # # Principal Component Analysis (PCA) is a widely used technique for # analyzing multivariate data. A practical example of applying Principal # Component Analysis to gene expression data is presented by Yeung and # Ruzzo (2001) @yeung2001. # # In essence, PCA is a coordinate transformation in which each row in the # data matrix is written as a linear sum over basis vectors called # principal components, which are ordered and chosen such that each # maximally explains the remaining variance in the data vectors. For # example, an $n \times 3$ data matrix can be represented as an # ellipsoidal cloud of $n$ points in three dimensional space. The first # principal component is the longest axis of the ellipsoid, the second # principal component the second longest axis of the ellipsoid, and the # third principal component is the shortest axis. Each row in the data # matrix can be reconstructed as a suitable linear combination of the # principal components. However, in order to reduce the dimensionality of # the data, usually only the most important principal components are # retained. The remaining variance present in the data is then regarded as # unexplained variance. # # The principal components can be found by calculating the eigenvectors of # the covariance matrix of the data. The corresponding eigenvalues # determine how much of the variance present in the data is explained by # each principal component. # # Before applying principal component analysis, typically the mean is # subtracted from each column in the data matrix. In the example above, # this effectively centers the ellipsoidal cloud around its centroid in 3D # space, with the principal components describing the variation of points # in the ellipsoidal cloud with respect to their centroid. # # The function `pca` below first uses the singular value decomposition to # calculate the eigenvalues and eigenvectors of the data matrix. The # singular value decomposition is implemented as a translation in C of the # Algol procedure `svd` @golub1971, which uses Householder # bidiagonalization and a variant of the QR algorithm. The principal # components, the coordinates of each data vector along the principal # components, and the eigenvalues corresponding to the principal # components are then evaluated and returned in decreasing order of the # magnitude of the eigenvalue. If data centering is desired, the mean # should be subtracted from each column in the data matrix before calling # the `pca` routine. # # To apply Principal Component Analysis to a rectangular matrix `data`, # use # # from Bio.Cluster import pca columnmean, coordinates, components, eigenvalues = pca(data) # # This function returns a tuple # `columnmean, coordinates, components, eigenvalues`: # # - `columnmean`\ # Array containing the mean over each column in `data`. # # - `coordinates`\ # The coordinates of each row in `data` with respect to the # principal components. # # - `components`\ # The principal components. # # - `eigenvalues`\ # The eigenvalues corresponding to each of the principal components. # # The original matrix `data` can be recreated by calculating # `columnmean + dot(coordinates, components)`. # # Handling Cluster/TreeView-type files # ------------------------------------ # # Cluster/TreeView are GUI-based codes for clustering gene expression # data. They were originally written by [<NAME>](http://rana.lbl.gov) while at Stanford University. `Bio.Cluster` # contains functions for reading and writing data files that correspond to # the format specified for Cluster/TreeView. In particular, by saving a # clustering result in that format, TreeView can be used to visualize the # clustering results. We recommend using <NAME>’s # <http://jtreeview.sourceforge.net/><span>Java TreeView program</span>, # which can display hierarchical as well as $k$-means clustering results. # # An object of the class `Record` contains all information stored in a # Cluster/TreeView-type data file. To store the information contained in # the data file in a `Record` object, we first open the file and then read # it: # # from Bio import Cluster handle = open("mydatafile.txt") record = Cluster.read(handle) handle.close() # # This two-step process gives you some flexibility in the source of the # data. For example, you can use # # import gzip # Python standard library handle = gzip.open("mydatafile.txt.gz") # # to open a gzipped file, or # # import urllib # Python standard library handle = urllib.urlopen("http://somewhere.org/mydatafile.txt") # # to open a file stored on the Internet before calling `read`. # # The `read` command reads the tab-delimited text file `mydatafile.txt` # containing gene expression data in the format specified for Michael # Eisen’s Cluster/TreeView program. For a description of this file format, # see the manual to Cluster/TreeView. It is available at [Michael Eisen’s # lab website](http://rana.lbl.gov/manuals/ClusterTreeView.pdf) and at # [our # website](http://bonsai.ims.u-tokyo.ac.jp/~mdehoon/software/cluster/cluster3.pdf). # # A `Record` object has the following attributes: # # - `data`\ # The data array containing the gene expression data. Genes are stored # row-wise, while microarrays are stored column-wise. # # - `mask`\ # This array shows which elements in the `data` array, if any, # are missing. If `mask[i,j]==0`, then `data[i,j]` is missing. If no # data were found to be missing, `mask` is set to `None`. # # - `geneid`\ # This is a list containing a unique description for each gene (i.e., # ORF numbers). # # - `genename`\ # This is a list containing a description for each gene (i.e., # gene name). If not present in the data file, `genename` is set to # `None`. # # - `gweight`\ # The weights that are to be used to calculate the distance in # expression profile between genes. If not present in the data file, # `gweight` is set to `None`. # # - `gorder`\ # The preferred order in which genes should be stored in an # output file. If not present in the data file, `gorder` is set to # `None`. # # - `expid`\ # This is a list containing a description of each microarray, e.g. # experimental condition. # # - `eweight`\ # The weights that are to be used to calculate the distance in # expression profile between microarrays. If not present in the data # file, `eweight` is set to `None`. # # - `eorder`\ # The preferred order in which microarrays should be stored in an # output file. If not present in the data file, `eorder` is set to # `None`. # # - `uniqid`\ # The string that was used instead of UNIQID in the data file. # # After loading a `Record` object, each of these attributes can be # accessed and modified directly. For example, the data can be # log-transformed by taking the logarithm of `record.data`. # # ### Calculating the distance matrix {#calculating-the-distance-matrix .unnumbered} # # To calculate the distance matrix between the items stored in the record, # use # # matrix = record.distancematrix() # # where the following arguments are defined: # # - `transpose` (default: `0`)\ # Determines if the distances between the rows of `data` are to be # calculated (`transpose==0`), or between the columns of `data` # (`transpose==1`). # # - `dist` (default: `'e'`, Euclidean distance)\ # Defines the distance function to be used # (see \[sec:distancefunctions\]). # # This function returns the distance matrix as a list of rows, where the # number of columns of each row is equal to the row number (see section # \[subsec:distancematrix\]). # # ### Calculating the cluster centroids {#calculating-the-cluster-centroids .unnumbered} # # To calculate the centroids of clusters of items stored in the record, # use # # cdata, cmask = record.clustercentroids() # # - `clusterid` (default: `None`)\ # Vector of integers showing to which cluster each item belongs. If # `clusterid` is not given, then all items are assumed to belong to # the same cluster. # # - `method` (default: `'a'`)\ # Specifies whether the arithmetic mean (`method=='a'`) or the median # (`method=='m'`) is used to calculate the cluster center. # # - `transpose` (default: `0`)\ # Determines if the centroids of the rows of `data` are to be # calculated (`transpose==0`), or the centroids of the columns of # `data` (`transpose==1`). # # This function returns the tuple `cdata, cmask`; see section # \[subsec:clustercentroids\] for a description. # # ### Calculating the distance between clusters {#calculating-the-distance-between-clusters-1 .unnumbered} # # To calculate the distance between clusters of items stored in the # record, use # # distance = record.clusterdistance() # # where the following arguments are defined: # # - `index1` (default: `0`)\ # A list containing the indices of the items belonging to the # first cluster. A cluster containing only one item $i$ can be # represented either as a list `[i]`, or as an integer `i`. # # - `index2` (default: `0`)\ # A list containing the indices of the items belonging to the # second cluster. A cluster containing only one item $i$ can be # represented either as a list `[i]`, or as an integer `i`. # # - `method` (default: `'a'`)\ # Specifies how the distance between clusters is defined: # # - `'a'`: Distance between the two cluster centroids (arithmetic # mean); # # - `'m'`: Distance between the two cluster centroids (median); # # - `'s'`: Shortest pairwise distance between items in the two # clusters; # # - `'x'`: Longest pairwise distance between items in the two # clusters; # # - `'v'`: Average over the pairwise distances between items in the # two clusters. # # - `dist` (default: `'e'`, Euclidean distance)\ # Defines the distance function to be used # (see \[sec:distancefunctions\]). # # - `transpose` (default: `0`)\ # If `transpose==0`, calculate the distance between the rows of # `data`. If `transpose==1`, calculate the distance between the # columns of `data`. # # ### Performing hierarchical clustering {#performing-hierarchical-clustering-1 .unnumbered} # # To perform hierarchical clustering on the items stored in the record, # use # # tree = record.treecluster() # # where the following arguments are defined: # # - `transpose` (default: `0`)\ # Determines if rows (`transpose==0`) or columns (`transpose==1`) are # to be clustered. # # - `method` (default: `'m'`)\ # defines the linkage method to be used: # # - `method=='s'`: pairwise single-linkage clustering # # - `method=='m'`: pairwise maximum- (or complete-) linkage # clustering # # - `method=='c'`: pairwise centroid-linkage clustering # # - `method=='a'`: pairwise average-linkage clustering # # - `dist` (default: `'e'`, Euclidean distance)\ # Defines the distance function to be used # (see \[sec:distancefunctions\]). # # - `transpose`\ # Determines if genes or microarrays are being clustered. If # `transpose==0`, genes (rows) are being clustered. If `transpose==1`, # microarrays (columns) are clustered. # # This function returns a `Tree` object. This object contains # $\left(\textrm{number of items} - 1\right)$ nodes, where the number of # items is the number of rows if rows were clustered, or the number of # columns if columns were clustered. Each node describes a pairwise # linking event, where the node attributes `left` and `right` each contain # the number of one item or subnode, and `distance` the distance between # them. Items are numbered from 0 to # $\left(\textrm{number of items} - 1\right)$, while clusters are numbered # -1 to $-\left(\textrm{number of items}-1\right)$. # # ### Performing $k$-means or $k$-medians clustering {#performing-k-means-or-k-medians-clustering .unnumbered} # # To perform $k$-means or $k$-medians clustering on the items stored in # the record, use # # clusterid, error, nfound = record.kcluster() # # where the following arguments are defined: # # - `nclusters` (default: `2`)\ # The number of clusters $k$. # # - `transpose` (default: `0`)\ # Determines if rows (`transpose` is `0`) or columns (`transpose` is # `1`) are to be clustered. # # - `npass` (default: `1`)\ # The number of times the $k$-means/-medians clustering algorithm is # performed, each time with a different (random) initial condition. If # `initialid` is given, the value of `npass` is ignored and the # clustering algorithm is run only once, as it behaves # deterministically in that case. # # - `method` (default: `a`)\ # describes how the center of a cluster is found: # # - `method=='a'`: arithmetic mean ($k$-means clustering); # # - `method=='m'`: median ($k$-medians clustering). # # For other values of `method`, the arithmetic mean is used. # # - `dist` (default: `'e'`, Euclidean distance)\ # Defines the distance function to be used # (see \[sec:distancefunctions\]). # # This function returns a tuple `(clusterid, error, nfound)`, where # `clusterid` is an integer array containing the number of the cluster to # which each row or cluster was assigned, `error` is the within-cluster # sum of distances for the optimal clustering solution, and `nfound` is # the number of times this optimal solution was found. # # ### Calculating a Self-Organizing Map {#calculating-a-self-organizing-map .unnumbered} # # To calculate a Self-Organizing Map of the items stored in the record, # use # # clusterid, celldata = record.somcluster() # # where the following arguments are defined: # # - `transpose` (default: `0`)\ # Determines if rows (`transpose` is `0`) or columns (`transpose` is # `1`) are to be clustered. # # - `nxgrid, nygrid` (default: `2, 1`)\ # The number of cells horizontally and vertically in the rectangular # grid on which the Self-Organizing Map is calculated. # # - `inittau` (default: `0.02`)\ # The initial value for the parameter $\tau$ that is used in the # SOM algorithm. The default value for `inittau` is 0.02, which was # used in Michael Eisen’s Cluster/TreeView program. # # - `niter` (default: `1`)\ # The number of iterations to be performed. # # - `dist` (default: `'e'`, Euclidean distance)\ # Defines the distance function to be used # (see \[sec:distancefunctions\]). # # This function returns the tuple `(clusterid, celldata)`: # # - `clusterid`:\ # An array with two columns, where the number of rows is equal to the # number of items that were clustered. Each row contains the $x$ and # $y$ coordinates of the cell in the rectangular SOM grid to which the # item was assigned. # # - `celldata`:\ # An array with dimensions # $\left(\verb|nxgrid|, \verb|nygrid|, \textrm{number of columns}\right)$ # if rows are being clustered, or # $\left(\verb|nxgrid|, \verb|nygrid|, \textrm{number of rows}\right)$ # if columns are being clustered. Each element `[ix][iy]` of this # array is a 1D vector containing the gene expression data for the # centroid of the cluster in the grid cell with coordinates # `[ix][iy]`. # # ### Saving the clustering result {#saving-the-clustering-result .unnumbered} # # To save the clustering result, use # # record.save(jobname, geneclusters, expclusters) # # where the following arguments are defined: # # - `jobname`\ # The string `jobname` is used as the base name for names of the files # that are to be saved. # # - `geneclusters`\ # This argument describes the gene (row-wise) clustering result. In # case of $k$-means clustering, this is a 1D array containing the # number of the cluster each gene belongs to. It can be calculated # using `kcluster`. In case of hierarchical clustering, `geneclusters` # is a `Tree` object. # # - `expclusters`\ # This argument describes the (column-wise) clustering result for the # experimental conditions. In case of $k$-means clustering, this is a # 1D array containing the number of the cluster each experimental # condition belongs to. It can be calculated using `kcluster`. In case # of hierarchical clustering, `expclusters` is a `Tree` object. # # This method writes the text file `jobname.cdt`, `jobname.gtr`, # `jobname.atr`, `jobname*.kgg`, and/or `jobname*.kag` for subsequent # reading by the Java TreeView program. If `geneclusters` and # `expclusters` are both `None`, this method only writes the text file # `jobname.cdt`; this file can subsequently be read into a new `Record` # object. # # Example calculation # ------------------- # # This is an example of a hierarchical clustering calculation, using # single linkage clustering for genes and maximum linkage clustering for # experimental conditions. As the Euclidean distance is being used for # gene clustering, it is necessary to scale the node distances `genetree` # such that they are all between zero and one. This is needed for the Java # TreeView code to display the tree diagram correctly. To cluster the # experimental conditions, the uncentered correlation is being used. No # scaling is needed in this case, as the distances in `exptree` are # already between zero and two. The example data `cyano.txt` can be found # in the `data` subdirectory. # # from Bio import Cluster handle = open("cyano.txt") record = Cluster.read(handle) handle.close() genetree = record.treecluster(method='s') genetree.scale() exptree = record.treecluster(dist='u', transpose=1) record.save("cyano_result", genetree, exptree) # # This will create the files `cyano_result.cdt`, `cyano_result.gtr`, and # `cyano_result.atr`. # # Similarly, we can save a $k$-means clustering solution: # # from Bio import Cluster handle = open("cyano.txt") record = Cluster.read(handle) handle.close() (geneclusters, error, ifound) = record.kcluster(nclusters=5, npass=1000) (expclusters, error, ifound) = record.kcluster(nclusters=2, npass=100, transpose=1) record.save("cyano_result", geneclusters, expclusters)
notebooks/15 - Cluster Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: HousingEnv # language: python # name: housingenv # --- # --- # ## Evaluating Model Performance # We'll investigate several different algorithms and determine which is best at modeling the data. # ### Implementation - Creating a Training and Predicting Pipeline # # To properly evaluate the performance of various models, we need to create a training and predicting pipeline that allows for quick and effective model training using various sizes of training data and perform predictions on the testing data. # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.patches as mpatches import seaborn as sns # %matplotlib inline from time import time from sklearn.model_selection import GridSearchCV from sklearn.metrics import make_scorer from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.ensemble import GradientBoostingRegressor # OPTIONAL: Load the "autoreload" extension so that code can change # %load_ext autoreload # OPTIONAL: always reload modules so that as you change code in src, it gets loaded # %autoreload 2 sns.set() # + df = pd.read_csv('../data/processed/features_final.csv', index_col='Id') target = pd.read_csv('../data/interim/target_log_xformed.csv', index_col='Id', squeeze=True) test_df = pd.read_csv('../data/processed/test_final.csv', index_col='Id') df.head() # - target.head() # + # Split the 'features' and 'income' data into training and testing sets X_train, X_val, y_train, y_val = train_test_split(df, target, test_size = 0.2, random_state = 5) # Show the results of the split print(f'Training set has {X_train.shape[0]} samples.') print(f'Testing set has {X_val.shape[0]} samples.') # + # def evaluate(results, mae): def evaluate(results): """ Visualization code to display results of various learners. inputs: - results: a list of supervised learners - stats: a list of dictionaries of the statistic results from 'train_predict()' - mae: Root-Mean-Squared-Error (RMSE) between the logarithm of the predicted value and the logarithm of the observed sales price """ # Create figure fig, ax = plt.subplots(2, 3, figsize = (16,12)) # Constants bar_width = 0.08 # colors = ['#A00000','#00A0A0','#00A000'] colors = sns.color_palette() # Super loop to plot four panels of data for k, learner in enumerate(results.keys()): for j, metric in enumerate(['train_time', 'rmse_train', 'mae_train', 'pred_time', 'rmse_test', 'mae_test']): for i in range(3): # Creative plot code ax[j//3, j%3].bar(i+k*bar_width, results[learner][i][metric], width = bar_width, color = colors[k]) ax[j//3, j%3].set_xticks([0.45, 1.45, 2.45]) ax[j//3, j%3].set_xticklabels(["1%", "10%", "100%"]) ax[j//3, j%3].set_xlabel("Training Set Size") ax[j//3, j%3].set_xlim((-0.1, 3.0)) # Add unique y-labels ax[0, 0].set_ylabel("Time (in seconds)") ax[0, 1].set_ylabel("Root Mean Squared Error") ax[0, 2].set_ylabel("Mean Absolute Error") ax[1, 0].set_ylabel("Time (in seconds)") ax[1, 1].set_ylabel("Root Mean Squared Error") ax[1, 2].set_ylabel("Mean Absolute Error") # Add titles ax[0, 0].set_title("Model Training") ax[0, 1].set_title("RMSE - Training Subset") ax[0, 2].set_title("MAE - Training Subset") ax[1, 0].set_title("Model Predicting") ax[1, 1].set_title("RMSE - Testing Set") ax[1, 2].set_title("MAE - Testing Subset") # Create patches for the legend patches = [] for i, learner in enumerate(results.keys()): patches.append(mpatches.Patch(color = colors[i], label = learner)) plt.legend(handles = patches, loc='upper center', bbox_to_anchor=(-.8, 2.53), ncol = 4, fontsize = 'x-large') # Aesthetics plt.suptitle("Performance Metrics for Three Supervised Learning Models", fontsize = 16, y = 1.0) plt.show() # + from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error def train_predict(learner, sample_size, X_train, y_train, X_val, y_val): ''' inputs: - learner: the learning algorithm to be trained and predicted on - sample_size: the size of samples (number) to be drawn from training set - X_train: features training set - y_train: income training set - X_val: features testing set - y_val: income testing set ''' results = {} # Fit the learner to the training data start = time() # Get start time learner = learner.fit(X_train[:sample_size], y_train[:sample_size]) end = time() # Get end time # Calculate the training time results['train_time'] = end - start # Get the predictions on the validation set(X_val) then training set start = time() # Get start time predictions_val = learner.predict(X_val) predictions_train = learner.predict(X_train) end = time() # Get end time # Calculate the total prediction time results['pred_time'] = end - start # Compute RMSE on the training set results['rmse_train'] = mean_squared_error(np.exp(y_train), np.exp(predictions_train), squared=False) # Compute RMSE on test set results['rmse_test'] = mean_squared_error(np.exp(y_val), np.exp(predictions_val), squared=False) # Compute MAE on the the training set results['mae_train'] = mean_absolute_error(np.exp(y_train), np.exp(predictions_train)) # Compute MAE on the validation set results['mae_test'] = mean_absolute_error(np.exp(y_val), np.exp(predictions_val)) # Success print(f'{learner.__class__.__name__} trained on {sample_size} samples.') # Return the results return results # - # ### Implementation: Initial Model Evaluation # + # Import supervised learning models from sklearn.linear_model import TweedieRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import AdaBoostRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.linear_model import Ridge from sklearn.neighbors import KNeighborsRegressor # from sklearn.tree import DecisionTreeRegressor # from xgboost import XGBRegressor # Initialize the models tr_model = TweedieRegressor(power=2, link='log') rf_model = RandomForestRegressor(random_state=5) adaBoost_model = AdaBoostRegressor(random_state=5) gb_model = GradientBoostingRegressor(random_state=5) ridge_model = Ridge(random_state=5) knn_model = KNeighborsRegressor() algorithms = [tr_model, rf_model, adaBoost_model, gb_model, ridge_model, knn_model] # + # Algorithm Comparison # Calculate the number of samples for 1%, 10%, and 100% of the training data # samples_100 is the entire training set i.e. len(y_train) # samples_10 is 10% of samples_100 # samples_1 is 1% of samples_100 samples_100 = len(y_train) samples_10 = np.int_((samples_100 * 0.1)) samples_1 = np.int_((samples_100 * 0.01)) # Collect results on the learners results = {} # for rgsr in [rgsr_A, rgsr_B, rgsr_C, rgsr_D]: for rgsr in algorithms: rgsr_name = rgsr.__class__.__name__ results[rgsr_name] = {} for i, samples in enumerate([samples_1, samples_10, samples_100]): results[rgsr_name][i] = \ train_predict(rgsr, samples, X_train, y_train, X_val, y_val) # Run metrics visualization for the three supervised learning models chosen evaluate(results) # Uncomment this line to save the figure. # plt.savefig('../reports/figures/Model_Comparison_Performance_Metrics.svg') # - results # ### Choosing the best model # # The GradientBoostRegressor Algorithm appears to return the lowest RMSE for our testing set. We'll try tuning it's parameters/hyperparameters to see if we can further improve our predictions # + from sklearn.model_selection import GridSearchCV from sklearn.metrics import make_scorer # Initialize the regressor rgsr = GradientBoostingRegressor(random_state=5) # Create the parameters list to tune parameters = {'n_estimators': [10, 100, 1000], 'learning_rate': [0.001, 0.01, 0.1]} # Make an RMSE scoring object using make_scorer() scorer = make_scorer(mean_squared_error, squared=False, greater_is_better=False) # Perform grid search on the classifier using 'scorer' as the scoring method using GridSearchCV() grid_obj = GridSearchCV(estimator=rgsr, param_grid=parameters, scoring=scorer) # Fit the grid search object to the training data and find the optimal parameters using fit() grid_fit = grid_obj.fit(X_train, y_train) # Get the estimator best_rgsr = grid_fit.best_estimator_ # Make predictions using the unoptimized and model predictions = (rgsr.fit(X_train, y_train)).predict(X_val) best_predictions = best_rgsr.predict(X_val) # Report the before-and-afterscores print("Unoptimized model\n------") print("RMSE on testing data: {:.4f}".format(mean_squared_error(np.exp(y_val), np.exp(predictions), squared=False))) print("\nOptimized Model\n------") print("Final RMSE on the testing data: {:.4f}".format(mean_squared_error(np.exp(y_val), np.exp(best_predictions), squared=False))) # - best_rgsr.get_params() final_preds = best_rgsr.predict(test_df) # + preds = pd.Series(np.exp(final_preds), index=test_df.index, name='SalePrice') # Uncomment this line to export Results # preds.to_csv('../data/processed/predictions.csv')
notebooks/0.3-saw-model-performance-evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys sys.path.append('aux/modules') import cf_data, cf_plot, cf_radar import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import warnings #warnings.filterwarnings('ignore') from IPython.display import Image # %matplotlib inline plt.style.use('aux/mpl-styles/dark.mplstyle') # - pdMajorManufacturers = cf_data.getLimitedData(lowerBound = 6) pdNormalized = pdMajorManufacturers.copy() pdNormalized.rename( columns = { 'city mpg': 'mpg_city', 'highway mpg': 'mpg_highway', 'horsepower': 'horse_power' }, inplace = True ) cf_data.normalizeColumns(['mpg_city', 'mpg_highway', 'horse_power'], pdNormalized) cf_data.normalizeColumnsInverted(['price', 'weight', 'riskiness', 'losses'], pdNormalized) # ### Creating Layers (Without Data) # + figure = plt.figure(figsize = (10, 8)) gridSpecMaster = mpl.gridspec.GridSpec(4, 2, height_ratios = [1, 2, 8, 2]) # Layer 01 - Title gridSpec01 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec = gridSpecMaster[0, :]) axesTitle = figure.add_subplot(gridSpec01[0]) # Layer 2 - Price gridSpec02 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec = gridSpecMaster[1, :]) axesPrices = figure.add_subplot(gridSpec02[0]) # Layer 3.1 - Risks gridSpec0301 = mpl.gridspec.GridSpecFromSubplotSpec(2, 2, height_ratios = [2, 1], subplot_spec = gridSpecMaster[2, :1]) axesRiskAndLoss = figure.add_subplot(gridSpec0301[0, :]) axesRisk = figure.add_subplot(gridSpec0301[1, :1]) axesLoss = figure.add_subplot(gridSpec0301[1:, 1]) # Layer 3.2 - Radar gridSpec0302 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec = gridSpecMaster[2, 1]) axesRadar = figure.add_subplot(gridSpec0302[0]) # Layer 4 - MPG gridSpec04 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec = gridSpecMaster[3, :]) axesMpg = figure.add_subplot(gridSpec04[0]) # Layer Join gridSpecMaster.tight_layout(figure) plt.show() # - # ### Dashboard Creation # + figure = plt.figure(figsize = (15, 25)) gridSpecMaster = mpl.gridspec.GridSpec(5, 2, height_ratios = [100, 100, 100, 100, 150], hspace = 0, wspace = 0) # Layer 01 - priceByManufacturer gridSpec01 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec = gridSpecMaster[0, :]) axes = figure.add_subplot(gridSpec01[0]) cf_plot.priceByManufacturer(figure, pdData = pdMajorManufacturers, axes = axes) # Layer 02 - inverseRiskinessByManufacturer and inverseLossesByManufacturer gridSpec02 = mpl.gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec = gridSpecMaster[1, :]) axes = figure.add_subplot(gridSpec02[0, 0]) cf_plot.inverseRiskinessByManufacturer(figure, pdData = pdNormalized, axes = axes, legend = False, rotateTicks = True) axes = figure.add_subplot(gridSpec02[0, 1]) cf_plot.inverseLossesByManufacturer(figure, pdData = pdNormalized, axes = axes,legend = False, rotateTicks = True) # Layer 03 - inverseRiskinessAndLossesCombinedByManufacturer gridSpec03 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec = gridSpecMaster[2, :]) axes = figure.add_subplot(gridSpec03[0]) cf_plot.inverseRiskinessAndLossesCombinedByManufacturer(figure, pdData = pdNormalized, axes = axes) # Layer 04 - mpgByManufacturer gridSpec04 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec = gridSpecMaster[3, :]) axes = figure.add_subplot(gridSpec04[0]) cf_plot.mpgByManufacturer(figure, pdData = pdMajorManufacturers, axes = axes) # Layer 05 - radarByManufacturer gridSpec05 = mpl.gridspec.GridSpecFromSubplotSpec( 3, 6, height_ratios = [1, 10, 10], wspace = 0.25, hspace = 1.25, subplot_spec = gridSpecMaster[4, :]) (rows, cols) = geometry = gridSpec05.get_geometry() title_axes = figure.add_subplot(gridSpec05[0, :]) inner_axes = [] projection = cf_radar.RadarAxes(spokeCount = len(pdNormalized.groupby("manufacturer").mean().columns)) [inner_axes.append(figure.add_subplot(m, projection = projection)) for m in [n for n in gridSpec05][cols:]] cf_plot.radarByManufacturer( figure, pdData = pdNormalized, titleAxes = title_axes, innerAxes = inner_axes, legendAxes = False, geometry = geometry) # Layer Join gridSpecMaster.tight_layout(figure) plt.show()
modules/02-data-organization-and-visualization/16-automobile-dashboard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example for a planning problem and a plan # # Here is a brief example for how to use the classes `PlanningProblem` and `Action` to represent a planning problem, and to verify that a given plan achieves the goals for it. # # Let's start by importing some things from [planning.py](planning.py). from planning import PlanningProblem, Action, Expr, expr import planning # Then let's specify an example planning problem. # # We give the initial state and the goals as strings (which will be automatically converted to a list of `Expr` objects). Similarly, for the single action in this planning problem, we specify the precondition and the effects as a string (which will be automatically converted to a list of `Expr` objects). # # After specifying the planning problem, we inspect its initial state. # + planning_problem = PlanningProblem( initial = 'Object(Book) & Object(Glasses) & Location(Room1) & Location(Room2) & At(Book,Room1) & At(Glasses,Room1)', goals = 'At(Book,Room2) & At(Glasses,Room2)', actions = [Action( 'Move(x, a, b)', precond='Object(x) & Location(a) & Location(b) & At(x, a) & ~At(x, b)', effect='At(x, b) & ~At(x, a)' )] ); planning_problem.initial # - # Now let's apply an action instance (given by `expr('Move(Book,Room1,Room2)')`) to this planning problem. # # This results in a new planning problem, where the initial state has changed (reflecting the application of the action). planning_problem.act(expr('Move(Book,Room1,Room2)')); planning_problem.initial # **Note:** after applying the method `act()`, the `PlanningProblem` object changed (in particular, it now has a different initial state). # # Let's now '`Move`' the '`Book`' back from '`Room2`' to '`Room1`', and see what the initial state of the resulting planning problem looks like after this. planning_problem.act(expr('Move(Book,Room2,Room1)')); planning_problem.initial # If we try to apply an action where the preconditions are not satisfied, we get an error: try: planning_problem.act(expr('Move(Book,Room2,Room1)')); except Exception as e: print("Exception: {}".format(e)); # For this simple planning problem, it is easy to see how to reach the goals. Let's make a plan, consisting of a list of `Expr` objects specifying which action instances to apply, and verify that this is a correct plan. # + plan = [expr('Move(Book,Room1,Room2)'), expr('Move(Glasses,Room1,Room2)')]; for action in plan: planning_problem.act(action); planning_problem.goal_test()
hw3/example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction # In previous notebooks, we've used the E. coli cor emap to visualize fluxes in metabolism. However there are of course species-differences between *P. thermoglucosidasius* NCBI 11955 and E. coli, so for further analysis of data it would be very benefical to have a map of central carbon metabolism as it is in our strain. This notebook will cover modifying the E. coli core map to fit our wishes as much as possible. # # Later we can decide to make one that also includes amino acid metabolism, or any other functionalities we would like. import cameo import pandas as pd import cobra.io import escher from escher import Builder from cobra import Reaction, Metabolite model = cobra.io.read_sbml_model('../model/p-thermo.xml') model_e_coli = cameo.load_model('iML1515') #here I can build the map on the e. coli core backbone builder = Builder(model=model, map_name = 'e_coli_core.Core metabolism') #here I can build the map on the e. coli core backbone, taking the initial changes Builder(model=model, map_json = '../map/p-thermo-map.json') # __Glutamate__ # # I have now observed that we have three glutamate metabolites: L, D and DL glutamate. According to bioinformatic data, our starin can form both L and D glutamate, though L is the predominant form required for biomas for example. # # Here I will remove the DL metabolite and leave the rest. # #remove ACYSAT, same as CYSAT model.remove_reactions(model.reactions.ACYSAT) model.remove_metabolites(model.metabolites.glu__DL_c) # __Additional changes__ Modifying the map also lets us see some changes that need to be made. # # For example, this strain has been shown to produce formate upon fermentation, and so we need a formate transport reaction that is currently lacking. (data from Tang et al. 2009, doi 10.1002/bit.22181) Bioinformatically, a formate-proton symporter has been identified and so this will be added. #add formate transport model.add_reaction(Reaction(id='FORt')) model.reactions.FORt.name = 'Transport of formate via proton symport' model.reactions.FORt.brounds = (-1000,1000) model.add_metabolites(Metabolite(id='for_e')) model.metabolites.for_e.name = 'Formate' model.metabolites.for_e.formula = model.metabolites.for_c.formula model.metabolites.for_e.name = model.metabolites.for_c.name model.metabolites.for_e.charge = model.metabolites.for_c.charge model.metabolites.for_e.compartment = 'e' model.metabolites.for_e.name = 'Formate' model.metabolites.h_e.name = 'Proton' model.metabolites.for_e.formula = model.metabolites.for_c.formula model.reactions.FORt.add_metabolites({model.metabolites.for_e: -1, model.metabolites.for_c: 1, model.metabolites.h_e:-1, model.metabolites.h_c:1}) # add phosphate transport: we supply phosphate in the medium, and it can be taken up by the cell. considering its role in ATP generation it will be an important metabolite to supply in the default medium. # model.add_metabolites(Metabolite(id='pi_e')) model.metabolites.pi_e.name = 'Phosphate' model.metabolites.pi_e.formula = model.metabolites.pi_c.formula model.metabolites.pi_e.charge = model.metabolites.pi_c.charge model.metabolites.pi_e.compartment = 'e' model.metabolites.pi_e.annotation = model_e_coli.metabolites.pi_e.annotation model.add_reaction(Reaction(id='PIt')) model.reactions.PIt.name = 'Transport of phosphate' model.reactions.PIt.bounds = (-1000,1000) model.reactions.PIt.add_metabolites({model.metabolites.pi_e:-1, model.metabolites.pi_c:1}) model.add_boundary(model.metabolites.pi_e, type = 'exchange', reaction_id = 'EX_pi_e') model.reactions.ICDHhr.id = 'ICDHyr' #modify name model.reactions.R5PISO.id = 'RPI' #save & commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # In looking into the map, I've looked into ATP synthesis, searching for the ATP synthase. In doing so, I ran into ATPPR, which just converts ATP to ADP reversibly. This needs to be removed, and interestingly doesn't even change our biomass prediction. Which is kind of strange, but atleast then I can remove it now. # model.remove_reactions(model.reactions.ATPPT) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # ## Modify map further # Now that we have fixed quite some problems in the model, I will modify the Escher map we have again so that it includes more correct nodes. #other comment, noticed i missed adding H20 and H+ to the biomass reaction when I added atp model.reactions.biomass.add_metabolites({model.metabolites.h2o_c:-104.9974, model.metabolites.h_c:104.9974}) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') #should be reversible to allow TCA cycle to function in both ways model.reactions.SUCDi.bounds = (-1000,1000) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') ### here I can build the map on the e. coli core backbone, taking the initial changes Builder(model=model, map_json = '../map/p-thermo-map.json') solution = model.optimize() Builder(model = model, map_json = '../map/p-thermo-map.json', reaction_data = solution.fluxes.to_dict()) # + active="" # model.reactions.ATPM # - model.reactions.ATPM # + active="" # model.reactions.ATPM # - model.reactions.ATPM # It is strange that so much of the glucose doesn't go through glycolysis. Looking into it, we see that the glucose is converted into fructose, which is phosphorylated 'for free' in the FRU1PT reaction. The F1P can be converted into dhAp and G3p, and flow back into metabolism. So the FRU1PT reaction should be fixed. This is one of the reactions that Martyn was going to look into and fix. So I will leave it for now, knowing that when the mass balance is complete this problem may be fixed. g_thermo_map = '../databases/Maps/p-thermo-map.json' #save map builder.save_html('example_map.html')
notebooks/16. Build Escher Map.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 ('base') # language: python # name: python3 # --- import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.model_selection import RepeatedStratifiedKFold, RandomizedSearchCV from scipy.stats import loguniform from sklearn.dummy import DummyClassifier # Load the dataset df = pd.read_csv('sonar.csv', header=None) # Split the Dataset data = df.values X, y = data[:, :-1], data[:, -1] X.shape, y.shape dummy = DummyClassifier() dummy.fit(X, y) dummy.score(X,y) # define Model model = LogisticRegression() # define evaluation cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=42) # define search space space = dict() space['solver'] = ['newton-cg', 'lbfgs', 'liblinear'] space['penalty'] = ['none', 'l1', 'l2', 'elasticnet'] space['C'] = loguniform(1e-5, 100) # define search search = RandomizedSearchCV(model, space, cv=cv, scoring='accuracy', n_iter=500, n_jobs=-1, random_state=42) # execute search result = search.fit(X,y) # summarize result print('Best Score: %s' % result.best_score_) print('Best Hyperparameters: %s' % result.best_params_)
Hyperparameter-Tuning/classification_randomsearch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: IPython (Python 3) # language: python # name: python3 # --- # # Vous le savez ... # # ## mais vous ne savez pas que vous le savez ! # # ![toimeme](tusais.jpg) # # ## Intro : Combien de temps pour apprendre Python ? hypothèse_apprendre = 10j # ### Combien de temps pour oublier qu'on sait ? hypothèse_oublier = hypothèse_apprendre * 30 hypothèse_oublier # ### Bon, c'est des hypothèses # # Je ne peux pas généraliser hypothèse_oublier.real # ## Ce que les développeurs Python savent # # (en général, enfin, moi, quoi) # ### Déclarer un encodage de fichier source # + # coding: utf-8 from sys import stdin # + # -*-coding:utf-8-*- # + # coding: cp125253577 windows = complex() # + # -*-coding: utf-8-*- # + # coding: il devrait y avoir une -et de préférence une seule- façon évidente de procéder # - # ### des str, des unicode (!), des bytes u"Bonne journée !" "Bonne journée" r"Bonne journée" b"Bonne journée" # ### import : oui oui, ça veut dire eval # Soit un fichier importable hello.py # ```python # print("hello world") # ``` # ### Compilation # # Le point d'entrée n'est pas compilé. # # ### Formatter des chaînes de caractères # # #### Tout le monde comprend for mean in "🚀🚜🚁🚲🚶🚂🚕🚚": print("Où as tu garé ton %s pour PyConFr ?" % mean) # #### Tout le monde ne comprend pas immédiatement, du coup "T'es en %s ou en %s ? Ça fait %i fois que je demande" % "🚀🚜" + 3 "T'es en %s ou en %s ? Ça fait %i fois que je demande" % ("🚀🚜" + (3,)) "T'es en %s ou en %s ? Ça fait %i fois que je demande" % ("🚀", "🚜", 3) # #### format() "positionnel strict avec des accolades {} {} {}".format(1, 2, 3) "positionnel qui fonctionne comme un mapping : {1}{4}{4}{7}".format(*"abcdefghij") # + class Contexte(): def __init__(self, quand=None): self.quand = quand or "après" "Ou en mapping {contexte.quand} tout, en accédant aux attributs !".format(contexte=Contexte()) # - # Mais alors pourquoi il y a tout ça, et pourquoi avec logging je ne peux pas utiliser format ? # ### Affectation multiple a, b, c = '123' print(a) print(b) print(c) # ### Coercition de type int(hypothèse_oublier) int(False) complex(False) complex(True) False == None False == bool(None) == bool( () ) == bool( [] ) == bool( set() ) == bool( "" ) assert any((False, None, (), [], set(), "", 0)), "🚫 circulez" # ### Boucles # # #### while # # RAS # # #### for biblio = ("20000 lieues") for repet in range(3): print(repet) else: print("what else?") # #### break # # mais pas de break avec label # ### Générateurs # # Destabilisant, si. # + def compute_sum(a, b): try: yield a + b except Exception as err: pass # on y revient compute_sum(1, 3) # - # ### Décorateurs # # «Alors voilà, la méthode qui est là c'est pas du tout celle qui sera là» # + def decorateur(func): return lambda x: 4 @decorateur def décorée(val): return "{0}{1}{0}".format("★☆", val) # - décorée("salut") # En pratique, on se contente en général de "wrapper" avec une méthode elle-même décorée par functools.wraps. # ### coroutines # + def parrot(): nextvalue = '' for repetition in range(4): nextvalue = yield nextvalue def play_with_parrot(): phrases = ( "coco est content", "allô ?", "haut les mains !", "bachibouzouk !" ) coco = parrot() next(coco) for phrase in phrases: print(coco.send(phrase)) play_with_parrot() # - # ![dead parrot](DeadParrot.png) next(n) # ### Séquences et constructeurs bizarres list("azertyuiop") dict( ( ("a", "SMALL LATIN LETTER A"), ("🚍", "ONCOMING BUS") ) ) # ### Valeurs par défaut, arguments variadiques def func(positionnel, valeur="défaut", *args, **kwargs): print(valeur) func(1, "celui-là, il va où ?") v = *("a", "b", "c") # ### Un opérateur matriciel ! # Personne ne s'en sert encore, et il faut Python 3.5, mais on en est fiers. # ```python # @ # ``` # + import numpy as np np.mat([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) @ np.mat([[1, 2], [2, 1], [3, 4]]) # - # ### Mutable, immutable a = list("") c = 5 b = c b += 1 c # ### Arguments mutables $\rightarrow$ variables globales involontaires # + def le_classique(valeur_avec_defaut=['🌈']): valeur_avec_defaut.append('🌈') return valeur_avec_defaut double_rainbow = le_classique() print(double_rainbow, id(double_rainbow)) double_rainbow = le_classique() print(double_rainbow, id(double_rainbow)) # oups # - # ### Traces imbriquées 👌 # + class DeuxièmeRideau(Exception): pass def explode(): print("{abc}".format(fgh="boum")) try: explode() except Exception as err: raise DeuxièmeRideau("oups") from err # - # #### Euh, pas du tout # # En vrai ça fait ça, et quand même c'est mieux : # # ```python # Traceback (most recent call last): # File "reraise.py", line 8, in <module> # explode() # File "reraise.py", line 5, in explode # print("{abc}".format(fgh="boum")) # KeyError: 'abc' # # The above exception was the direct cause of the following exception: # # Traceback (most recent call last): # File "reraise.py", line 10, in <module> # raise DeuxièmeRideau("oups") from err # __main__.DeuxièmeRideau: oups # ``` # ### context manager 👌 # # Ça plaît beaucoup mais... # # * comment détecte-t-on qu'on peut utiliser with ? # * quand il faut lire le code de gestion : un générateur décoré avec un try:except ! # ### Constructions en intention # # list comprehensions, inline generators [i for i in range(3)] [i for i in range(9) if i % 3] # ### Objet, classes # # #### Python 2 : des nouvelles et des anciennes classes # # (ouf, la syntaxe de super() est enfin supportable en Python 3). # # #### un initialiseur, pas un constructeur, la plupart du temps # # ```python # class CM2(): # def __init__(self...) # ``` # # #### Les classes sont des objets # # 1. on ne parle pas de metaclass # 2. on ne PARLE PAS de metaclass # 3. C'est quand même très utile (sqlalchemy ou autres serializers) # # On ne parle pas non plus du "binding" de méthode lors de la construction. # ### Attributs privés, cachés # # mais en fait non, # # mais pas touche class CM2(): def __len__(self): return 28 def __gt__(self, other): return len(self) > len(other) c = CM2() # ### Propriétés # # Relier des attributs entre eux. # + import math class Square(object): """Legitimate use of properties""" def __init__(self, side_length): self.side_length = side_length def _calc_diagonal(self): return math.sqrt(2) * self.side_length # méthode la plus simple diagonal = property(fget=_calc_diagonal) # avec des décorateurs @property def surface(self): return self.side_length ** 2 @surface.setter def surface(self, surface): self.side_length = math.sqrt(surface) # - Square(10).diagonal four = Square(4) print(four.side_length) four.surface = 4 print(four.side_length) four.diagonal = 4 # Les attributs lecture seule. # ### Les protocoles et le ducktyping len(c) c > c # #### Les protocoles sont la meilleure façon d'expliquer le pourquoi et le comment # # (en tout cas, que j'ai trouvée) c.__getattribute__ # Là, tout s'éclaire ! # ![lumiere](lightbulb.png) # # Conclusion # # ### Les protocoles : l'explication qui fonctionne le mieux, selon moi # # ```python # iterable # callable # ``` # # ### Python, des fruits délicieux à cueillir avec discernement # # ![„<NAME>“ von <NAME>, BenHur - Eigenes Werk. Lizenziert unter CC BY-SA 3.0 über Wikimedia Commons - https://commons.wikimedia.org/wiki/File:Frucht_der_Edelkastanie.jpg#/media/File:Frucht_der_Edelkastanie.jpg](bogue.jpg) # # ### Un conseil (qui ne coûte rien), pour la route ? # # Soyez lisible et modifiable, ne pérénisez pas votre emploi de boguiste, car ça ne dure de toutes façons qu'une saison ! # # ```python # range(5)[::-1] # ``` # # ### Il reste des points durs # # ```python # from java import logging as ⛾ # from libc import os # ``` # # # # ![boguiste](boguiste.png) # # (et pas de caractères bizarres dans le code, non plus ;-)
conf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- my_data = self.my_data_object title = "Percentage of Total Trades Done with China" df = my_data.get_Chinadata_by_country() df = df.rename(columns={'TradePctGDPChange': 'Trade/GDP ratio change'}) # GDP growth correlation china_gdp_df = df[df['Country'] == 'China'][['Country', 'GDP Growth Pct']].reset_index(drop = True) other_gdp_df = df[df['Country'] != 'China'][['Country', 'GDP Growth Pct']] other_gdp_df = other_gdp_df.drop_duplicates().reset_index(drop = True) country_list = df[df['Country'] !='China']['Country'].unique() num_country_per_line = math.ceil(len(country_list)/3.0) gdp_correl = {} for country in country_list: gdp_correl[country] = china_gdp_df['GDP Growth Pct'].corr( other_gdp_df[other_gdp_df['Country']==country]['GDP Growth Pct'].reset_index(drop = True)) gdp_correl_df = pd.DataFrame(gdp_correl.items(), columns=['Country', 'GDPcorrel_w_China']) df = df.merge(gdp_correl_df, on = 'Country', how = 'left') # Slider filter year_slider = alt.binding_range(min=2014, max=2020, step=1) slider_selection = alt.selection_single(bind=year_slider, fields=['Year'], name="Year", init={'Year': 2020}) # Pie charts base = alt.Chart(df).encode( theta=alt.Theta(field="total_trade", type="quantitative"), color=alt.Color(field="isChinaPartner", type="nominal", scale = alt.Scale(domain = ['Trades with China', 'GDP Growth Pct', 'Trades with Others', 'Trade/GDP ratio change'], range = ['#265499', '#AFD097', '#2899CC', '#EEBC59']), #'#2f6684', '#ff7c43', '#acc8df', '#665191' legend = alt.Legend(title="Key")), tooltip=alt.Tooltip('total_trade', format="$,.0f") ) chart1 = alt.hconcat() for country in country_list[0:num_country_per_line]: base_pie = base.transform_filter( alt.FieldEqualPredicate(field='Country', equal=country) ).mark_arc(outerRadius=(width/35)) base_text = base.transform_calculate( PercentOfTotal="datum.total_trade / datum.total_toWorld_trade" ).transform_filter( alt.FieldEqualPredicate(field='Country', equal=country) ).mark_text(radius=(width/30+10), size=12).encode( text=alt.Text("PercentOfTotal:Q", format='.1%') ) chart1 |= (base_pie+base_text).add_selection( slider_selection ).transform_filter( slider_selection ).properties(title=country,width=(width/8),height=(height/10+30)) chart2 = alt.hconcat() for country in country_list[num_country_per_line:num_country_per_line*2]: base_pie = base.transform_filter( alt.FieldEqualPredicate(field='Country', equal=country) ).mark_arc(outerRadius=(width/35)) base_text = base.transform_calculate( PercentOfTotal="datum.total_trade / datum.total_toWorld_trade" ).transform_filter( alt.FieldEqualPredicate(field='Country', equal=country) ).mark_text(radius=(width/30+10), size=12).encode( text=alt.Text("PercentOfTotal:Q", format='.1%') ) chart2 |= (base_pie+base_text).add_selection( slider_selection ).transform_filter( slider_selection ).properties(title=country,width=(width/8),height=(height/10+30)) chart3 = alt.hconcat() for country in country_list[num_country_per_line*2:]: base_pie = base.transform_filter( alt.FieldEqualPredicate(field='Country', equal=country) ).mark_arc(outerRadius=(width/35)) base_text = base.transform_calculate( PercentOfTotal="datum.total_trade / datum.total_toWorld_trade" ).transform_filter( alt.FieldEqualPredicate(field='Country', equal=country) ).mark_text(radius=(width/30+10), size=12).encode( text=alt.Text("PercentOfTotal:Q", format='.1%') ) chart3 |= (base_pie+base_text).add_selection( slider_selection ).transform_filter( slider_selection ).properties(title=country,width=(width/8),height=(height/10+30)) # brush selection brush_selection = alt.selection_single(fields=['Country'], empty='none') # line charts dependency_bars = alt.Chart(df).mark_bar(opacity = 0.9, color = '#265499', size = 30).encode( x = alt.Y('Country:N', sort='-y', axis=alt.Axis(labelAngle=-30, labelOverlap=False, labelFontSize=12, labelFontWeight = 'bold')), y = 'PercentOfTotal:Q' ).transform_calculate( PercentOfTotal="datum.total_trade / datum.total_toWorld_trade" ) dependency_text = dependency_bars.mark_text(dy = -10).encode( x = alt.Y('Country:N', sort='-y', axis = None), y = 'PercentOfTotal:Q', text=alt.Text("PercentOfTotal:Q", format='.1%') ) dependency_chart = alt.layer(dependency_bars, dependency_text).transform_filter( alt.FieldEqualPredicate(field='Year', equal=2020) ).transform_filter( alt.FieldEqualPredicate(field='isChinaPartner', equal='Trades with China') ).resolve_scale( x = 'independent' ).add_selection( brush_selection ).properties( title="Click a country to see how its economy growth is associated to its trade growth", width=(width*0.75),height=(height/10) ) # Correlation fact corr_text = alt.Chart(df).mark_text(size = 40).encode( text=alt.Text("GDPcorrel_w_China:Q", format='.1%') ).transform_filter( brush_selection ).transform_filter( alt.FieldEqualPredicate(field='Year', equal=2020) ).properties( title="GDP Growth Correlation with China", width=(width*0.25),height=(height/10) ) # ruler selection nearest = alt.selection(type='single', nearest=True, on='mouseover', fields=['Year'], empty='none') gdp_base = alt.Chart(df).transform_fold( ['Trade/GDP ratio change', 'GDP Growth Pct'] ) gdp_line = gdp_base.mark_line().encode( x = alt.X('Year:O',axis=alt.Axis(labelAngle=0)), y = alt.Y('value:Q',axis=alt.Axis(title = 'YoY Growth %', format='.1f')), color = 'key:N', tooltip=[alt.Tooltip('Year'), alt.Tooltip('Trade/GDP ratio change', format=".2f"), alt.Tooltip('GDP Growth Pct', format=".2f")] ).transform_filter( brush_selection ) selectors = gdp_base.mark_point().encode( x=alt.X('Year:O',axis=alt.Axis(labelAngle=0)), opacity=alt.value(0), ).add_selection( nearest ) #points = gdp_line.mark_point().encode( # opacity=alt.condition(nearest, alt.value(1), alt.value(0)) #) points = gdp_line.mark_point( opacity=0.0, size=3000 ).encode( x=alt.X('year:O',axis=None), y=alt.Y('value:Q',axis=None), tooltip=[alt.Tooltip('year'), alt.Tooltip('Trade/GDP ratio change', format=".2f"), alt.Tooltip('GDP Growth Pct', format=".2f")] ) # Draw text labels near the points, and highlight based on selection #text = gdp_line.mark_text(align='left', dx=5, dy=-5).encode( # text=alt.condition(nearest, 'value:Q', alt.value(' ')) #) #text = gdp_base.mark_text(align='center', dx=5, dy=-5).encode( # x=alt.X('year:O',axis=None), # y=alt.Y('value:Q',axis=None), # text=alt.Text('value:Q', format='.1f'), # color = 'key:N' #).transform_filter( # brush_selection #) # Draw a rule at the location of the selection rules = gdp_base.mark_rule(color='gray').encode( x='Year:Q', ).transform_filter( nearest ) gdp_combine = alt.layer( gdp_line#, text #points, selectors,rules, ).properties( title="GDP and Trade/GDP ratio YoY Growth Percentage" ,width=width,height=(height*3/5-140) ) return_chart = (chart1 & chart2 & chart3 & (dependency_chart | corr_text) & gdp_combine).configure_title( baseline="line-top", dy = -5 ) a= (1,3) a[1] a[0]
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import csv input_file = open('output.txt', 'r') output_file = open('h2_mycsv_EX.csv', 'w') for line in input_file: (a, date, time, lon) = line.strip().split(' ') output_file.write(','.join([a, date, time, lon]) + '\n') input_file.close() output_file.close() # -
Extra_Data_Analysis/TextToCsv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # [Project title - should be precise and concise] # *[Author names]* # ### Abstract # # [What did you do in a nutshell? Question – Method(s) – Results] [Lorem ipsum dolor sit amet, ea ius suas oblique definitiones, in qui nullam possit conclusionemque. Sed et bonorum theophrastus. Purto euismod qualisque id eos, congue audiam omittantur cu his. At augue impedit per. Cu intellegam interesset pri. Ius omnes senserit eu, falli mediocrem mei at, scripserit repudiandae nam ad. Sint sanctus vis ex, ancillae maiestatis et usu, omnes dicunt qui ne. His ei mazim voluptua scripserit, hinc adhuc quidam has in, per cu ipsum offendit consulatu. In doctus iriure pro, pro eu illud omnium. Corpora corrumpit philosophia at nec, no mel vide ponderum. Molestie consetetur ex sit, ignota invidunt instructior est id, nisl commodo omittantur id vel.] # ### Introduction # # [What is the question ?] [Lorem ipsum dolor sit amet, ea ius suas oblique definitiones, in qui nullam possit conclusionemque. Sed et bonorum theophrastus. Purto euismod qualisque id eos, congue audiam omittantur cu his. At augue impedit per. Cu intellegam interesset pri. Ius omnes senserit eu, falli mediocrem mei at, scripserit repudiandae nam ad. Sint sanctus vis ex, ancillae maiestatis et usu, omnes dicunt qui ne. His ei mazim voluptua scripserit, hinc adhuc quidam has in, per cu ipsum offendit consulatu. In doctus iriure pro, pro eu illud omnium. # # Corpora corrumpit philosophia at nec, no mel vide ponderum. Molestie consetetur ex sit, ignota invidunt instructior est id, nisl commodo omittantur id vel. Vero vidit dolorem ei cum, eam suas inimicus postulant an. His ea quod putant sadipscing. Harum recusabo ut mea, ius in soleat sadipscing, nullam vocibus his ei. Quo at nulla eruditi definiebas, est ad iusto patrioque accommodare, et odio porro vix. Eu quod adhuc hendrerit eam. Ea has dolorum volumus, at singulis indoctum mei. Sit tollit praesent salutatus eu, maluisset imperdiet mei an. Dicta democritum cu eum, eu nam offendit expetenda scripserit. Sed ut aliquip urbanitas honestatis, per nulla gubergren ne, duo no inermis denique torquatos. Ad facilisis efficiantur intellegebat cum, duis audire intellegebat nec at. Duis quando cotidieque et eam, eu quod duis justo per. Tota altera adipisci no est, gloriatur democritum nam et. Sed clita recusabo te, pri fabellas pertinacia moderatius no, per ne impedit nominati argumentum. Civibus consetetur usu et, duo in munere oporteat.] # # ### Results # # [Describe for each step in a few words what you are doing.] # + # your code # try to annotate the code - add comments and explanations # - # [Comment and explain your results.] # [Figures should come with a caption providing details of what is shown.] # more code # [More explanations.] # even more code ... # ### Discussion # [What does it mean?] [Ad facilisis efficiantur intellegebat cum, duis audire intellegebat nec at. Corpora corrumpit philosophia at nec, no mel vide ponderum. Molestie consetetur ex sit, ignota invidunt instructior est id, nisl commodo omittantur id vel. Vero vidit dolorem ei cum, eam suas inimicus postulant an. His ea quod putant sadipscing. Harum recusabo ut mea, ius in soleat sadipscing, nullam vocibus his ei. Quo at nulla eruditi definiebas, est ad iusto patrioque accommodare, et odio porro vix. Eu quod adhuc hendrerit eam. Ea has dolorum volumus, at singulis indoctum mei. Sit tollit praesent salutatus eu, maluisset imperdiet mei an. Dicta democritum cu eum, eu nam offendit expetenda scripserit. Sed ut aliquip urbanitas honestatis, per nulla gubergren ne, duo no inermis denique torquatos. Ad facilisis efficiantur intellegebat cum, duis audire intellegebat nec at. Ad facilisis efficiantur intellegebat cum, duis audire intellegebat nec at. Corpora corrumpit philosophia at nec, no mel vide ponderum. Thank you # # Molestie consetetur ex sit, ignota invidunt instructior est id, nisl commodo omittantur id vel. Vero vidit dolorem ei cum, eam suas inimicus postulant an. His ea quod putant sadipscing. Harum recusabo ut mea, ius in soleat sadipscing, nullam vocibus his ei. Quo at nulla eruditi definiebas, est ad iusto patrioque accommodare, et odio porro vix. Eu quod adhucThank you hendrerit eam. Ea has dolorum volumus, at singulis indoctum mei. Sit tollit praesent salutatus eu, maluisset imperdiet mei. Molestie consetetur ex sit, ignota invidunt instructior est id, nisl commodo omittantur id vel. Vero vidit dolorem ei cum, eam suas inimicus postulant an. His ea quod putant sadipscing. Harum recusabo ut mea, ius in soleat sadipscing, nullam vocibus his ei. Quo at nulla eruditi definiebas, est ad iusto patrioque accommodare, et odio porro vix. Eu quod adhuc hendrerit eam. Ea has dolorum volumus, at singulis indoctum mei. Sit tollit praesent salutatus eu, maluisset imperdiet mei.] # ### References # # 1. <NAME>. Emotion circuits in the brain. Annu. Rev. Neurosci. 23, 155–184 (2000). # 2. <NAME>., <NAME>. & <NAME>. Temporal specificity of fear conditioning: effects of different conditioned stimulus-unconditioned stimulus intervals on the fear-potentiated startle effect. J. Exp. Psychol. Anim. Behav. Process. 15, 295–310 (1989). # 3. Knippenberg, <NAME>., <NAME>., <NAME>. & <NAME>. Fast, transient cardiac accelerations and decelerations during fear conditioning in rats. Physiol. Behav. 105, 607–612 (2012). #
miscFiles/empty-project-template.ipynb